i965/fs: Add infrastructure for generating CSEL instructions.
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Set the Message Descriptor and Extended Message Descriptor fields
364 * for SEND messages.
365 *
366 * \note This zeroes out the Function Control bits, so it must be called
367 * \b before filling out any message-specific data. Callers can
368 * choose not to fill in irrelevant bits; they will be zero.
369 */
370 void
371 brw_set_message_descriptor(struct brw_codegen *p,
372 brw_inst *inst,
373 enum brw_message_target sfid,
374 unsigned msg_length,
375 unsigned response_length,
376 bool header_present,
377 bool end_of_thread)
378 {
379 const struct gen_device_info *devinfo = p->devinfo;
380
381 brw_set_src1(p, inst, brw_imm_d(0));
382
383 /* For indirect sends, `inst` will not be the SEND/SENDC instruction
384 * itself; instead, it will be a MOV/OR into the address register.
385 *
386 * In this case, we avoid setting the extended message descriptor bits,
387 * since they go on the later SEND/SENDC instead and if set here would
388 * instead clobber the conditionalmod bits.
389 */
390 unsigned opcode = brw_inst_opcode(devinfo, inst);
391 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
392 brw_inst_set_sfid(devinfo, inst, sfid);
393 }
394
395 brw_inst_set_mlen(devinfo, inst, msg_length);
396 brw_inst_set_rlen(devinfo, inst, response_length);
397 brw_inst_set_eot(devinfo, inst, end_of_thread);
398
399 if (devinfo->gen >= 5) {
400 brw_inst_set_header_present(devinfo, inst, header_present);
401 }
402 }
403
404 static void brw_set_math_message( struct brw_codegen *p,
405 brw_inst *inst,
406 unsigned function,
407 unsigned integer_type,
408 bool low_precision,
409 unsigned dataType )
410 {
411 const struct gen_device_info *devinfo = p->devinfo;
412 unsigned msg_length;
413 unsigned response_length;
414
415 /* Infer message length from the function */
416 switch (function) {
417 case BRW_MATH_FUNCTION_POW:
418 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
419 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
420 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
421 msg_length = 2;
422 break;
423 default:
424 msg_length = 1;
425 break;
426 }
427
428 /* Infer response length from the function */
429 switch (function) {
430 case BRW_MATH_FUNCTION_SINCOS:
431 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
432 response_length = 2;
433 break;
434 default:
435 response_length = 1;
436 break;
437 }
438
439
440 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
441 msg_length, response_length, false, false);
442 brw_inst_set_math_msg_function(devinfo, inst, function);
443 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
444 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
445 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
446 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
447 brw_inst_set_saturate(devinfo, inst, 0);
448 }
449
450
451 static void brw_set_ff_sync_message(struct brw_codegen *p,
452 brw_inst *insn,
453 bool allocate,
454 unsigned response_length,
455 bool end_of_thread)
456 {
457 const struct gen_device_info *devinfo = p->devinfo;
458
459 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
460 1, response_length, true, end_of_thread);
461 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
462 brw_inst_set_urb_allocate(devinfo, insn, allocate);
463 /* The following fields are not used by FF_SYNC: */
464 brw_inst_set_urb_global_offset(devinfo, insn, 0);
465 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
466 brw_inst_set_urb_used(devinfo, insn, 0);
467 brw_inst_set_urb_complete(devinfo, insn, 0);
468 }
469
470 static void brw_set_urb_message( struct brw_codegen *p,
471 brw_inst *insn,
472 enum brw_urb_write_flags flags,
473 unsigned msg_length,
474 unsigned response_length,
475 unsigned offset,
476 unsigned swizzle_control )
477 {
478 const struct gen_device_info *devinfo = p->devinfo;
479
480 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
481 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
482 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
483
484 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
485 msg_length, response_length, true,
486 flags & BRW_URB_WRITE_EOT);
487
488 if (flags & BRW_URB_WRITE_OWORD) {
489 assert(msg_length == 2); /* header + one OWORD of data */
490 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
491 } else {
492 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
493 }
494
495 brw_inst_set_urb_global_offset(devinfo, insn, offset);
496 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
497
498 if (devinfo->gen < 8) {
499 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
500 }
501
502 if (devinfo->gen < 7) {
503 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
504 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
505 } else {
506 brw_inst_set_urb_per_slot_offset(devinfo, insn,
507 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
508 }
509 }
510
511 void
512 brw_set_dp_write_message(struct brw_codegen *p,
513 brw_inst *insn,
514 unsigned binding_table_index,
515 unsigned msg_control,
516 unsigned msg_type,
517 unsigned target_cache,
518 unsigned msg_length,
519 bool header_present,
520 unsigned last_render_target,
521 unsigned response_length,
522 unsigned end_of_thread,
523 unsigned send_commit_msg)
524 {
525 const struct gen_device_info *devinfo = p->devinfo;
526 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
527 BRW_SFID_DATAPORT_WRITE);
528
529 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
530 header_present, end_of_thread);
531
532 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
533 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
534 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
535 brw_inst_set_rt_last(devinfo, insn, last_render_target);
536 if (devinfo->gen < 7) {
537 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
538 }
539 }
540
541 void
542 brw_set_dp_read_message(struct brw_codegen *p,
543 brw_inst *insn,
544 unsigned binding_table_index,
545 unsigned msg_control,
546 unsigned msg_type,
547 unsigned target_cache,
548 unsigned msg_length,
549 bool header_present,
550 unsigned response_length)
551 {
552 const struct gen_device_info *devinfo = p->devinfo;
553 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
554 BRW_SFID_DATAPORT_READ);
555
556 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
557 header_present, false);
558
559 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
560 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
561 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
562 if (devinfo->gen < 6)
563 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
564 }
565
566 void
567 brw_set_sampler_message(struct brw_codegen *p,
568 brw_inst *inst,
569 unsigned binding_table_index,
570 unsigned sampler,
571 unsigned msg_type,
572 unsigned response_length,
573 unsigned msg_length,
574 unsigned header_present,
575 unsigned simd_mode,
576 unsigned return_format)
577 {
578 const struct gen_device_info *devinfo = p->devinfo;
579
580 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
581 response_length, header_present, false);
582
583 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
584 brw_inst_set_sampler(devinfo, inst, sampler);
585 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
586 if (devinfo->gen >= 5) {
587 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
588 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
589 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
590 }
591 }
592
593 static void
594 gen7_set_dp_scratch_message(struct brw_codegen *p,
595 brw_inst *inst,
596 bool write,
597 bool dword,
598 bool invalidate_after_read,
599 unsigned num_regs,
600 unsigned addr_offset,
601 unsigned mlen,
602 unsigned rlen,
603 bool header_present)
604 {
605 const struct gen_device_info *devinfo = p->devinfo;
606 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
607 (devinfo->gen >= 8 && num_regs == 8));
608 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
609 num_regs - 1);
610
611 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
612 mlen, rlen, header_present, false);
613 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
614 brw_inst_set_scratch_read_write(devinfo, inst, write);
615 brw_inst_set_scratch_type(devinfo, inst, dword);
616 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
617 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
618 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
619 }
620
621 #define next_insn brw_next_insn
622 brw_inst *
623 brw_next_insn(struct brw_codegen *p, unsigned opcode)
624 {
625 const struct gen_device_info *devinfo = p->devinfo;
626 brw_inst *insn;
627
628 if (p->nr_insn + 1 > p->store_size) {
629 p->store_size <<= 1;
630 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
631 }
632
633 p->next_insn_offset += 16;
634 insn = &p->store[p->nr_insn++];
635 memcpy(insn, p->current, sizeof(*insn));
636
637 brw_inst_set_opcode(devinfo, insn, opcode);
638 return insn;
639 }
640
641 static brw_inst *
642 brw_alu1(struct brw_codegen *p, unsigned opcode,
643 struct brw_reg dest, struct brw_reg src)
644 {
645 brw_inst *insn = next_insn(p, opcode);
646 brw_set_dest(p, insn, dest);
647 brw_set_src0(p, insn, src);
648 return insn;
649 }
650
651 static brw_inst *
652 brw_alu2(struct brw_codegen *p, unsigned opcode,
653 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
654 {
655 /* 64-bit immediates are only supported on 1-src instructions */
656 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
657 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
658
659 brw_inst *insn = next_insn(p, opcode);
660 brw_set_dest(p, insn, dest);
661 brw_set_src0(p, insn, src0);
662 brw_set_src1(p, insn, src1);
663 return insn;
664 }
665
666 static int
667 get_3src_subreg_nr(struct brw_reg reg)
668 {
669 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
670 * use 32-bit units (components 0..7). Since they only support F/D/UD
671 * types, this doesn't lose any flexibility, but uses fewer bits.
672 */
673 return reg.subnr / 4;
674 }
675
676 static enum gen10_align1_3src_vertical_stride
677 to_3src_align1_vstride(enum brw_vertical_stride vstride)
678 {
679 switch (vstride) {
680 case BRW_VERTICAL_STRIDE_0:
681 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
682 case BRW_VERTICAL_STRIDE_2:
683 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
684 case BRW_VERTICAL_STRIDE_4:
685 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
686 case BRW_VERTICAL_STRIDE_8:
687 case BRW_VERTICAL_STRIDE_16:
688 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
689 default:
690 unreachable("invalid vstride");
691 }
692 }
693
694
695 static enum gen10_align1_3src_src_horizontal_stride
696 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
697 {
698 switch (hstride) {
699 case BRW_HORIZONTAL_STRIDE_0:
700 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
701 case BRW_HORIZONTAL_STRIDE_1:
702 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
703 case BRW_HORIZONTAL_STRIDE_2:
704 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
705 case BRW_HORIZONTAL_STRIDE_4:
706 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
707 default:
708 unreachable("invalid hstride");
709 }
710 }
711
712 static brw_inst *
713 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
714 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
715 {
716 const struct gen_device_info *devinfo = p->devinfo;
717 brw_inst *inst = next_insn(p, opcode);
718
719 gen7_convert_mrf_to_grf(p, &dest);
720
721 assert(dest.nr < 128);
722 assert(src0.nr < 128);
723 assert(src1.nr < 128);
724 assert(src2.nr < 128);
725 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
726 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
727 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
728 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
729
730 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
731 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
732 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
733
734 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
735 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
736 BRW_ALIGN1_3SRC_ACCUMULATOR);
737 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
738 } else {
739 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
740 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
741 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
742 }
743 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
744
745 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
746
747 if (brw_reg_type_is_floating_point(dest.type)) {
748 brw_inst_set_3src_a1_exec_type(devinfo, inst,
749 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
750 } else {
751 brw_inst_set_3src_a1_exec_type(devinfo, inst,
752 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
753 }
754
755 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
756 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
757 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
758 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
759
760 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
761 to_3src_align1_vstride(src0.vstride));
762 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
763 to_3src_align1_vstride(src1.vstride));
764 /* no vstride on src2 */
765
766 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
767 to_3src_align1_hstride(src0.hstride));
768 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
769 to_3src_align1_hstride(src1.hstride));
770 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
771 to_3src_align1_hstride(src2.hstride));
772
773 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
774 if (src0.type == BRW_REGISTER_TYPE_NF) {
775 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
776 } else {
777 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
778 }
779 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
780 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
781
782 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
783 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
784 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
785 } else {
786 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
787 }
788 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
789 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
790
791 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
792 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
793 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
794 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
795
796 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
797 src0.file == BRW_IMMEDIATE_VALUE ||
798 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
799 src0.type == BRW_REGISTER_TYPE_NF));
800 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
801 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
802 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
803 src2.file == BRW_IMMEDIATE_VALUE);
804
805 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
806 src0.file == BRW_GENERAL_REGISTER_FILE ?
807 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
808 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
809 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
810 src1.file == BRW_GENERAL_REGISTER_FILE ?
811 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
812 BRW_ALIGN1_3SRC_ACCUMULATOR);
813 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
814 src2.file == BRW_GENERAL_REGISTER_FILE ?
815 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
816 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
817 } else {
818 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
819 dest.file == BRW_MESSAGE_REGISTER_FILE);
820 assert(dest.type == BRW_REGISTER_TYPE_F ||
821 dest.type == BRW_REGISTER_TYPE_DF ||
822 dest.type == BRW_REGISTER_TYPE_D ||
823 dest.type == BRW_REGISTER_TYPE_UD);
824 if (devinfo->gen == 6) {
825 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
826 dest.file == BRW_MESSAGE_REGISTER_FILE);
827 }
828 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
829 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
830 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
831
832 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
833 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
834 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
835 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
836 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
837 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
838 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
839 src0.vstride == BRW_VERTICAL_STRIDE_0);
840
841 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
842 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
843 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
844 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
845 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
846 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
847 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
848 src1.vstride == BRW_VERTICAL_STRIDE_0);
849
850 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
851 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
852 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
853 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
854 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
855 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
856 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
857 src2.vstride == BRW_VERTICAL_STRIDE_0);
858
859 if (devinfo->gen >= 7) {
860 /* Set both the source and destination types based on dest.type,
861 * ignoring the source register types. The MAD and LRP emitters ensure
862 * that all four types are float. The BFE and BFI2 emitters, however,
863 * may send us mixed D and UD types and want us to ignore that and use
864 * the destination type.
865 */
866 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
867 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
868 }
869 }
870
871 return inst;
872 }
873
874
875 /***********************************************************************
876 * Convenience routines.
877 */
878 #define ALU1(OP) \
879 brw_inst *brw_##OP(struct brw_codegen *p, \
880 struct brw_reg dest, \
881 struct brw_reg src0) \
882 { \
883 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
884 }
885
886 #define ALU2(OP) \
887 brw_inst *brw_##OP(struct brw_codegen *p, \
888 struct brw_reg dest, \
889 struct brw_reg src0, \
890 struct brw_reg src1) \
891 { \
892 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
893 }
894
895 #define ALU3(OP) \
896 brw_inst *brw_##OP(struct brw_codegen *p, \
897 struct brw_reg dest, \
898 struct brw_reg src0, \
899 struct brw_reg src1, \
900 struct brw_reg src2) \
901 { \
902 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
903 }
904
905 #define ALU3F(OP) \
906 brw_inst *brw_##OP(struct brw_codegen *p, \
907 struct brw_reg dest, \
908 struct brw_reg src0, \
909 struct brw_reg src1, \
910 struct brw_reg src2) \
911 { \
912 assert(dest.type == BRW_REGISTER_TYPE_F || \
913 dest.type == BRW_REGISTER_TYPE_DF); \
914 if (dest.type == BRW_REGISTER_TYPE_F) { \
915 assert(src0.type == BRW_REGISTER_TYPE_F); \
916 assert(src1.type == BRW_REGISTER_TYPE_F); \
917 assert(src2.type == BRW_REGISTER_TYPE_F); \
918 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
919 assert(src0.type == BRW_REGISTER_TYPE_DF); \
920 assert(src1.type == BRW_REGISTER_TYPE_DF); \
921 assert(src2.type == BRW_REGISTER_TYPE_DF); \
922 } \
923 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
924 }
925
926 /* Rounding operations (other than RNDD) require two instructions - the first
927 * stores a rounded value (possibly the wrong way) in the dest register, but
928 * also sets a per-channel "increment bit" in the flag register. A predicated
929 * add of 1.0 fixes dest to contain the desired result.
930 *
931 * Sandybridge and later appear to round correctly without an ADD.
932 */
933 #define ROUND(OP) \
934 void brw_##OP(struct brw_codegen *p, \
935 struct brw_reg dest, \
936 struct brw_reg src) \
937 { \
938 const struct gen_device_info *devinfo = p->devinfo; \
939 brw_inst *rnd, *add; \
940 rnd = next_insn(p, BRW_OPCODE_##OP); \
941 brw_set_dest(p, rnd, dest); \
942 brw_set_src0(p, rnd, src); \
943 \
944 if (devinfo->gen < 6) { \
945 /* turn on round-increments */ \
946 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
947 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
948 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
949 } \
950 }
951
952
953 ALU2(SEL)
954 ALU1(NOT)
955 ALU2(AND)
956 ALU2(OR)
957 ALU2(XOR)
958 ALU2(SHR)
959 ALU2(SHL)
960 ALU1(DIM)
961 ALU2(ASR)
962 ALU3(CSEL)
963 ALU1(FRC)
964 ALU1(RNDD)
965 ALU2(MAC)
966 ALU2(MACH)
967 ALU1(LZD)
968 ALU2(DP4)
969 ALU2(DPH)
970 ALU2(DP3)
971 ALU2(DP2)
972 ALU3(MAD)
973 ALU3F(LRP)
974 ALU1(BFREV)
975 ALU3(BFE)
976 ALU2(BFI1)
977 ALU3(BFI2)
978 ALU1(FBH)
979 ALU1(FBL)
980 ALU1(CBIT)
981 ALU2(ADDC)
982 ALU2(SUBB)
983
984 ROUND(RNDZ)
985 ROUND(RNDE)
986
987 brw_inst *
988 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
989 {
990 const struct gen_device_info *devinfo = p->devinfo;
991
992 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
993 * To avoid the problems that causes, we use a <1,2,0> source region to read
994 * each element twice.
995 */
996 if (devinfo->gen == 7 && !devinfo->is_haswell &&
997 brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1 &&
998 dest.type == BRW_REGISTER_TYPE_DF &&
999 (src0.type == BRW_REGISTER_TYPE_F ||
1000 src0.type == BRW_REGISTER_TYPE_D ||
1001 src0.type == BRW_REGISTER_TYPE_UD) &&
1002 !has_scalar_region(src0)) {
1003 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
1004 src0.width == BRW_WIDTH_4 &&
1005 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1006
1007 src0.vstride = BRW_VERTICAL_STRIDE_1;
1008 src0.width = BRW_WIDTH_2;
1009 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1010 }
1011
1012 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1013 }
1014
1015 brw_inst *
1016 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1017 struct brw_reg src0, struct brw_reg src1)
1018 {
1019 /* 6.2.2: add */
1020 if (src0.type == BRW_REGISTER_TYPE_F ||
1021 (src0.file == BRW_IMMEDIATE_VALUE &&
1022 src0.type == BRW_REGISTER_TYPE_VF)) {
1023 assert(src1.type != BRW_REGISTER_TYPE_UD);
1024 assert(src1.type != BRW_REGISTER_TYPE_D);
1025 }
1026
1027 if (src1.type == BRW_REGISTER_TYPE_F ||
1028 (src1.file == BRW_IMMEDIATE_VALUE &&
1029 src1.type == BRW_REGISTER_TYPE_VF)) {
1030 assert(src0.type != BRW_REGISTER_TYPE_UD);
1031 assert(src0.type != BRW_REGISTER_TYPE_D);
1032 }
1033
1034 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1035 }
1036
1037 brw_inst *
1038 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1039 struct brw_reg src0, struct brw_reg src1)
1040 {
1041 assert(dest.type == src0.type);
1042 assert(src0.type == src1.type);
1043 switch (src0.type) {
1044 case BRW_REGISTER_TYPE_B:
1045 case BRW_REGISTER_TYPE_UB:
1046 case BRW_REGISTER_TYPE_W:
1047 case BRW_REGISTER_TYPE_UW:
1048 case BRW_REGISTER_TYPE_D:
1049 case BRW_REGISTER_TYPE_UD:
1050 break;
1051 default:
1052 unreachable("Bad type for brw_AVG");
1053 }
1054
1055 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1056 }
1057
1058 brw_inst *
1059 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1060 struct brw_reg src0, struct brw_reg src1)
1061 {
1062 /* 6.32.38: mul */
1063 if (src0.type == BRW_REGISTER_TYPE_D ||
1064 src0.type == BRW_REGISTER_TYPE_UD ||
1065 src1.type == BRW_REGISTER_TYPE_D ||
1066 src1.type == BRW_REGISTER_TYPE_UD) {
1067 assert(dest.type != BRW_REGISTER_TYPE_F);
1068 }
1069
1070 if (src0.type == BRW_REGISTER_TYPE_F ||
1071 (src0.file == BRW_IMMEDIATE_VALUE &&
1072 src0.type == BRW_REGISTER_TYPE_VF)) {
1073 assert(src1.type != BRW_REGISTER_TYPE_UD);
1074 assert(src1.type != BRW_REGISTER_TYPE_D);
1075 }
1076
1077 if (src1.type == BRW_REGISTER_TYPE_F ||
1078 (src1.file == BRW_IMMEDIATE_VALUE &&
1079 src1.type == BRW_REGISTER_TYPE_VF)) {
1080 assert(src0.type != BRW_REGISTER_TYPE_UD);
1081 assert(src0.type != BRW_REGISTER_TYPE_D);
1082 }
1083
1084 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1085 src0.nr != BRW_ARF_ACCUMULATOR);
1086 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1087 src1.nr != BRW_ARF_ACCUMULATOR);
1088
1089 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1090 }
1091
1092 brw_inst *
1093 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1094 struct brw_reg src0, struct brw_reg src1)
1095 {
1096 src0.vstride = BRW_VERTICAL_STRIDE_0;
1097 src0.width = BRW_WIDTH_1;
1098 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1099 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1100 }
1101
1102 brw_inst *
1103 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1104 struct brw_reg src0, struct brw_reg src1)
1105 {
1106 src0.vstride = BRW_VERTICAL_STRIDE_0;
1107 src0.width = BRW_WIDTH_1;
1108 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1109 src1.vstride = BRW_VERTICAL_STRIDE_8;
1110 src1.width = BRW_WIDTH_8;
1111 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1112 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1113 }
1114
1115 brw_inst *
1116 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1117 {
1118 const struct gen_device_info *devinfo = p->devinfo;
1119 const bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1120 /* The F32TO16 instruction doesn't support 32-bit destination types in
1121 * Align1 mode, and neither does the Gen8 implementation in terms of a
1122 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1123 * an undocumented feature.
1124 */
1125 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1126 (!align16 || devinfo->gen >= 8));
1127 brw_inst *inst;
1128
1129 if (align16) {
1130 assert(dst.type == BRW_REGISTER_TYPE_UD);
1131 } else {
1132 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1133 dst.type == BRW_REGISTER_TYPE_W ||
1134 dst.type == BRW_REGISTER_TYPE_UW ||
1135 dst.type == BRW_REGISTER_TYPE_HF);
1136 }
1137
1138 brw_push_insn_state(p);
1139
1140 if (needs_zero_fill) {
1141 brw_set_default_access_mode(p, BRW_ALIGN_1);
1142 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1143 }
1144
1145 if (devinfo->gen >= 8) {
1146 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1147 } else {
1148 assert(devinfo->gen == 7);
1149 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1150 }
1151
1152 if (needs_zero_fill) {
1153 brw_inst_set_no_dd_clear(devinfo, inst, true);
1154 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1155 brw_inst_set_no_dd_check(devinfo, inst, true);
1156 }
1157
1158 brw_pop_insn_state(p);
1159 return inst;
1160 }
1161
1162 brw_inst *
1163 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1164 {
1165 const struct gen_device_info *devinfo = p->devinfo;
1166 bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1167
1168 if (align16) {
1169 assert(src.type == BRW_REGISTER_TYPE_UD);
1170 } else {
1171 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1172 *
1173 * Because this instruction does not have a 16-bit floating-point
1174 * type, the source data type must be Word (W). The destination type
1175 * must be F (Float).
1176 */
1177 if (src.type == BRW_REGISTER_TYPE_UD)
1178 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1179
1180 assert(src.type == BRW_REGISTER_TYPE_W ||
1181 src.type == BRW_REGISTER_TYPE_UW ||
1182 src.type == BRW_REGISTER_TYPE_HF);
1183 }
1184
1185 if (devinfo->gen >= 8) {
1186 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1187 } else {
1188 assert(devinfo->gen == 7);
1189 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1190 }
1191 }
1192
1193
1194 void brw_NOP(struct brw_codegen *p)
1195 {
1196 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1197 memset(insn, 0, sizeof(*insn));
1198 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1199 }
1200
1201
1202
1203
1204
1205 /***********************************************************************
1206 * Comparisons, if/else/endif
1207 */
1208
1209 brw_inst *
1210 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1211 unsigned predicate_control)
1212 {
1213 const struct gen_device_info *devinfo = p->devinfo;
1214 struct brw_reg ip = brw_ip_reg();
1215 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1216
1217 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1218 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1219 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1220 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1221
1222 return inst;
1223 }
1224
1225 static void
1226 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1227 {
1228 p->if_stack[p->if_stack_depth] = inst - p->store;
1229
1230 p->if_stack_depth++;
1231 if (p->if_stack_array_size <= p->if_stack_depth) {
1232 p->if_stack_array_size *= 2;
1233 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1234 p->if_stack_array_size);
1235 }
1236 }
1237
1238 static brw_inst *
1239 pop_if_stack(struct brw_codegen *p)
1240 {
1241 p->if_stack_depth--;
1242 return &p->store[p->if_stack[p->if_stack_depth]];
1243 }
1244
1245 static void
1246 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1247 {
1248 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1249 p->loop_stack_array_size *= 2;
1250 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1251 p->loop_stack_array_size);
1252 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1253 p->loop_stack_array_size);
1254 }
1255
1256 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1257 p->loop_stack_depth++;
1258 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1259 }
1260
1261 static brw_inst *
1262 get_inner_do_insn(struct brw_codegen *p)
1263 {
1264 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1265 }
1266
1267 /* EU takes the value from the flag register and pushes it onto some
1268 * sort of a stack (presumably merging with any flag value already on
1269 * the stack). Within an if block, the flags at the top of the stack
1270 * control execution on each channel of the unit, eg. on each of the
1271 * 16 pixel values in our wm programs.
1272 *
1273 * When the matching 'else' instruction is reached (presumably by
1274 * countdown of the instruction count patched in by our ELSE/ENDIF
1275 * functions), the relevant flags are inverted.
1276 *
1277 * When the matching 'endif' instruction is reached, the flags are
1278 * popped off. If the stack is now empty, normal execution resumes.
1279 */
1280 brw_inst *
1281 brw_IF(struct brw_codegen *p, unsigned execute_size)
1282 {
1283 const struct gen_device_info *devinfo = p->devinfo;
1284 brw_inst *insn;
1285
1286 insn = next_insn(p, BRW_OPCODE_IF);
1287
1288 /* Override the defaults for this instruction:
1289 */
1290 if (devinfo->gen < 6) {
1291 brw_set_dest(p, insn, brw_ip_reg());
1292 brw_set_src0(p, insn, brw_ip_reg());
1293 brw_set_src1(p, insn, brw_imm_d(0x0));
1294 } else if (devinfo->gen == 6) {
1295 brw_set_dest(p, insn, brw_imm_w(0));
1296 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1297 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1298 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1299 } else if (devinfo->gen == 7) {
1300 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1301 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1302 brw_set_src1(p, insn, brw_imm_w(0));
1303 brw_inst_set_jip(devinfo, insn, 0);
1304 brw_inst_set_uip(devinfo, insn, 0);
1305 } else {
1306 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1307 brw_set_src0(p, insn, brw_imm_d(0));
1308 brw_inst_set_jip(devinfo, insn, 0);
1309 brw_inst_set_uip(devinfo, insn, 0);
1310 }
1311
1312 brw_inst_set_exec_size(devinfo, insn, execute_size);
1313 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1314 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1315 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1316 if (!p->single_program_flow && devinfo->gen < 6)
1317 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1318
1319 push_if_stack(p, insn);
1320 p->if_depth_in_loop[p->loop_stack_depth]++;
1321 return insn;
1322 }
1323
1324 /* This function is only used for gen6-style IF instructions with an
1325 * embedded comparison (conditional modifier). It is not used on gen7.
1326 */
1327 brw_inst *
1328 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1329 struct brw_reg src0, struct brw_reg src1)
1330 {
1331 const struct gen_device_info *devinfo = p->devinfo;
1332 brw_inst *insn;
1333
1334 insn = next_insn(p, BRW_OPCODE_IF);
1335
1336 brw_set_dest(p, insn, brw_imm_w(0));
1337 brw_inst_set_exec_size(devinfo, insn,
1338 brw_inst_exec_size(devinfo, p->current));
1339 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1340 brw_set_src0(p, insn, src0);
1341 brw_set_src1(p, insn, src1);
1342
1343 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1344 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1345 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1346
1347 push_if_stack(p, insn);
1348 return insn;
1349 }
1350
1351 /**
1352 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1353 */
1354 static void
1355 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1356 brw_inst *if_inst, brw_inst *else_inst)
1357 {
1358 const struct gen_device_info *devinfo = p->devinfo;
1359
1360 /* The next instruction (where the ENDIF would be, if it existed) */
1361 brw_inst *next_inst = &p->store[p->nr_insn];
1362
1363 assert(p->single_program_flow);
1364 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1365 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1366 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1367
1368 /* Convert IF to an ADD instruction that moves the instruction pointer
1369 * to the first instruction of the ELSE block. If there is no ELSE
1370 * block, point to where ENDIF would be. Reverse the predicate.
1371 *
1372 * There's no need to execute an ENDIF since we don't need to do any
1373 * stack operations, and if we're currently executing, we just want to
1374 * continue normally.
1375 */
1376 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1377 brw_inst_set_pred_inv(devinfo, if_inst, true);
1378
1379 if (else_inst != NULL) {
1380 /* Convert ELSE to an ADD instruction that points where the ENDIF
1381 * would be.
1382 */
1383 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1384
1385 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1386 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1387 } else {
1388 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1389 }
1390 }
1391
1392 /**
1393 * Patch IF and ELSE instructions with appropriate jump targets.
1394 */
1395 static void
1396 patch_IF_ELSE(struct brw_codegen *p,
1397 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1398 {
1399 const struct gen_device_info *devinfo = p->devinfo;
1400
1401 /* We shouldn't be patching IF and ELSE instructions in single program flow
1402 * mode when gen < 6, because in single program flow mode on those
1403 * platforms, we convert flow control instructions to conditional ADDs that
1404 * operate on IP (see brw_ENDIF).
1405 *
1406 * However, on Gen6, writing to IP doesn't work in single program flow mode
1407 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1408 * not be updated by non-flow control instructions."). And on later
1409 * platforms, there is no significant benefit to converting control flow
1410 * instructions to conditional ADDs. So we do patch IF and ELSE
1411 * instructions in single program flow mode on those platforms.
1412 */
1413 if (devinfo->gen < 6)
1414 assert(!p->single_program_flow);
1415
1416 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1417 assert(endif_inst != NULL);
1418 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1419
1420 unsigned br = brw_jump_scale(devinfo);
1421
1422 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1423 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1424
1425 if (else_inst == NULL) {
1426 /* Patch IF -> ENDIF */
1427 if (devinfo->gen < 6) {
1428 /* Turn it into an IFF, which means no mask stack operations for
1429 * all-false and jumping past the ENDIF.
1430 */
1431 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1432 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1433 br * (endif_inst - if_inst + 1));
1434 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1435 } else if (devinfo->gen == 6) {
1436 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1437 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1438 } else {
1439 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1440 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1441 }
1442 } else {
1443 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1444
1445 /* Patch IF -> ELSE */
1446 if (devinfo->gen < 6) {
1447 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1448 br * (else_inst - if_inst));
1449 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1450 } else if (devinfo->gen == 6) {
1451 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1452 br * (else_inst - if_inst + 1));
1453 }
1454
1455 /* Patch ELSE -> ENDIF */
1456 if (devinfo->gen < 6) {
1457 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1458 * matching ENDIF.
1459 */
1460 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1461 br * (endif_inst - else_inst + 1));
1462 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1463 } else if (devinfo->gen == 6) {
1464 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1465 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1466 br * (endif_inst - else_inst));
1467 } else {
1468 /* The IF instruction's JIP should point just past the ELSE */
1469 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1470 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1471 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1472 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1473 if (devinfo->gen >= 8) {
1474 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1475 * should point to ENDIF.
1476 */
1477 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1478 }
1479 }
1480 }
1481 }
1482
1483 void
1484 brw_ELSE(struct brw_codegen *p)
1485 {
1486 const struct gen_device_info *devinfo = p->devinfo;
1487 brw_inst *insn;
1488
1489 insn = next_insn(p, BRW_OPCODE_ELSE);
1490
1491 if (devinfo->gen < 6) {
1492 brw_set_dest(p, insn, brw_ip_reg());
1493 brw_set_src0(p, insn, brw_ip_reg());
1494 brw_set_src1(p, insn, brw_imm_d(0x0));
1495 } else if (devinfo->gen == 6) {
1496 brw_set_dest(p, insn, brw_imm_w(0));
1497 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1498 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1499 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1500 } else if (devinfo->gen == 7) {
1501 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1502 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1503 brw_set_src1(p, insn, brw_imm_w(0));
1504 brw_inst_set_jip(devinfo, insn, 0);
1505 brw_inst_set_uip(devinfo, insn, 0);
1506 } else {
1507 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1508 brw_set_src0(p, insn, brw_imm_d(0));
1509 brw_inst_set_jip(devinfo, insn, 0);
1510 brw_inst_set_uip(devinfo, insn, 0);
1511 }
1512
1513 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1514 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1515 if (!p->single_program_flow && devinfo->gen < 6)
1516 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1517
1518 push_if_stack(p, insn);
1519 }
1520
1521 void
1522 brw_ENDIF(struct brw_codegen *p)
1523 {
1524 const struct gen_device_info *devinfo = p->devinfo;
1525 brw_inst *insn = NULL;
1526 brw_inst *else_inst = NULL;
1527 brw_inst *if_inst = NULL;
1528 brw_inst *tmp;
1529 bool emit_endif = true;
1530
1531 /* In single program flow mode, we can express IF and ELSE instructions
1532 * equivalently as ADD instructions that operate on IP. On platforms prior
1533 * to Gen6, flow control instructions cause an implied thread switch, so
1534 * this is a significant savings.
1535 *
1536 * However, on Gen6, writing to IP doesn't work in single program flow mode
1537 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1538 * not be updated by non-flow control instructions."). And on later
1539 * platforms, there is no significant benefit to converting control flow
1540 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1541 * Gen5.
1542 */
1543 if (devinfo->gen < 6 && p->single_program_flow)
1544 emit_endif = false;
1545
1546 /*
1547 * A single next_insn() may change the base address of instruction store
1548 * memory(p->store), so call it first before referencing the instruction
1549 * store pointer from an index
1550 */
1551 if (emit_endif)
1552 insn = next_insn(p, BRW_OPCODE_ENDIF);
1553
1554 /* Pop the IF and (optional) ELSE instructions from the stack */
1555 p->if_depth_in_loop[p->loop_stack_depth]--;
1556 tmp = pop_if_stack(p);
1557 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1558 else_inst = tmp;
1559 tmp = pop_if_stack(p);
1560 }
1561 if_inst = tmp;
1562
1563 if (!emit_endif) {
1564 /* ENDIF is useless; don't bother emitting it. */
1565 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1566 return;
1567 }
1568
1569 if (devinfo->gen < 6) {
1570 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1571 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1572 brw_set_src1(p, insn, brw_imm_d(0x0));
1573 } else if (devinfo->gen == 6) {
1574 brw_set_dest(p, insn, brw_imm_w(0));
1575 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1576 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1577 } else if (devinfo->gen == 7) {
1578 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1579 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1580 brw_set_src1(p, insn, brw_imm_w(0));
1581 } else {
1582 brw_set_src0(p, insn, brw_imm_d(0));
1583 }
1584
1585 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1586 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1587 if (devinfo->gen < 6)
1588 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1589
1590 /* Also pop item off the stack in the endif instruction: */
1591 if (devinfo->gen < 6) {
1592 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1593 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1594 } else if (devinfo->gen == 6) {
1595 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1596 } else {
1597 brw_inst_set_jip(devinfo, insn, 2);
1598 }
1599 patch_IF_ELSE(p, if_inst, else_inst, insn);
1600 }
1601
1602 brw_inst *
1603 brw_BREAK(struct brw_codegen *p)
1604 {
1605 const struct gen_device_info *devinfo = p->devinfo;
1606 brw_inst *insn;
1607
1608 insn = next_insn(p, BRW_OPCODE_BREAK);
1609 if (devinfo->gen >= 8) {
1610 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1611 brw_set_src0(p, insn, brw_imm_d(0x0));
1612 } else if (devinfo->gen >= 6) {
1613 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1614 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1615 brw_set_src1(p, insn, brw_imm_d(0x0));
1616 } else {
1617 brw_set_dest(p, insn, brw_ip_reg());
1618 brw_set_src0(p, insn, brw_ip_reg());
1619 brw_set_src1(p, insn, brw_imm_d(0x0));
1620 brw_inst_set_gen4_pop_count(devinfo, insn,
1621 p->if_depth_in_loop[p->loop_stack_depth]);
1622 }
1623 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1624 brw_inst_set_exec_size(devinfo, insn,
1625 brw_inst_exec_size(devinfo, p->current));
1626
1627 return insn;
1628 }
1629
1630 brw_inst *
1631 brw_CONT(struct brw_codegen *p)
1632 {
1633 const struct gen_device_info *devinfo = p->devinfo;
1634 brw_inst *insn;
1635
1636 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1637 brw_set_dest(p, insn, brw_ip_reg());
1638 if (devinfo->gen >= 8) {
1639 brw_set_src0(p, insn, brw_imm_d(0x0));
1640 } else {
1641 brw_set_src0(p, insn, brw_ip_reg());
1642 brw_set_src1(p, insn, brw_imm_d(0x0));
1643 }
1644
1645 if (devinfo->gen < 6) {
1646 brw_inst_set_gen4_pop_count(devinfo, insn,
1647 p->if_depth_in_loop[p->loop_stack_depth]);
1648 }
1649 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1650 brw_inst_set_exec_size(devinfo, insn,
1651 brw_inst_exec_size(devinfo, p->current));
1652 return insn;
1653 }
1654
1655 brw_inst *
1656 gen6_HALT(struct brw_codegen *p)
1657 {
1658 const struct gen_device_info *devinfo = p->devinfo;
1659 brw_inst *insn;
1660
1661 insn = next_insn(p, BRW_OPCODE_HALT);
1662 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1663 if (devinfo->gen >= 8) {
1664 brw_set_src0(p, insn, brw_imm_d(0x0));
1665 } else {
1666 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1667 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1668 }
1669
1670 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1671 brw_inst_set_exec_size(devinfo, insn,
1672 brw_inst_exec_size(devinfo, p->current));
1673 return insn;
1674 }
1675
1676 /* DO/WHILE loop:
1677 *
1678 * The DO/WHILE is just an unterminated loop -- break or continue are
1679 * used for control within the loop. We have a few ways they can be
1680 * done.
1681 *
1682 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1683 * jip and no DO instruction.
1684 *
1685 * For non-uniform control flow pre-gen6, there's a DO instruction to
1686 * push the mask, and a WHILE to jump back, and BREAK to get out and
1687 * pop the mask.
1688 *
1689 * For gen6, there's no more mask stack, so no need for DO. WHILE
1690 * just points back to the first instruction of the loop.
1691 */
1692 brw_inst *
1693 brw_DO(struct brw_codegen *p, unsigned execute_size)
1694 {
1695 const struct gen_device_info *devinfo = p->devinfo;
1696
1697 if (devinfo->gen >= 6 || p->single_program_flow) {
1698 push_loop_stack(p, &p->store[p->nr_insn]);
1699 return &p->store[p->nr_insn];
1700 } else {
1701 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1702
1703 push_loop_stack(p, insn);
1704
1705 /* Override the defaults for this instruction:
1706 */
1707 brw_set_dest(p, insn, brw_null_reg());
1708 brw_set_src0(p, insn, brw_null_reg());
1709 brw_set_src1(p, insn, brw_null_reg());
1710
1711 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1712 brw_inst_set_exec_size(devinfo, insn, execute_size);
1713 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1714
1715 return insn;
1716 }
1717 }
1718
1719 /**
1720 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1721 * instruction here.
1722 *
1723 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1724 * nesting, since it can always just point to the end of the block/current loop.
1725 */
1726 static void
1727 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1728 {
1729 const struct gen_device_info *devinfo = p->devinfo;
1730 brw_inst *do_inst = get_inner_do_insn(p);
1731 brw_inst *inst;
1732 unsigned br = brw_jump_scale(devinfo);
1733
1734 assert(devinfo->gen < 6);
1735
1736 for (inst = while_inst - 1; inst != do_inst; inst--) {
1737 /* If the jump count is != 0, that means that this instruction has already
1738 * been patched because it's part of a loop inside of the one we're
1739 * patching.
1740 */
1741 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1742 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1743 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1744 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1745 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1746 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1747 }
1748 }
1749 }
1750
1751 brw_inst *
1752 brw_WHILE(struct brw_codegen *p)
1753 {
1754 const struct gen_device_info *devinfo = p->devinfo;
1755 brw_inst *insn, *do_insn;
1756 unsigned br = brw_jump_scale(devinfo);
1757
1758 if (devinfo->gen >= 6) {
1759 insn = next_insn(p, BRW_OPCODE_WHILE);
1760 do_insn = get_inner_do_insn(p);
1761
1762 if (devinfo->gen >= 8) {
1763 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1764 brw_set_src0(p, insn, brw_imm_d(0));
1765 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1766 } else if (devinfo->gen == 7) {
1767 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1768 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1769 brw_set_src1(p, insn, brw_imm_w(0));
1770 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1771 } else {
1772 brw_set_dest(p, insn, brw_imm_w(0));
1773 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1774 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1775 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1776 }
1777
1778 brw_inst_set_exec_size(devinfo, insn,
1779 brw_inst_exec_size(devinfo, p->current));
1780
1781 } else {
1782 if (p->single_program_flow) {
1783 insn = next_insn(p, BRW_OPCODE_ADD);
1784 do_insn = get_inner_do_insn(p);
1785
1786 brw_set_dest(p, insn, brw_ip_reg());
1787 brw_set_src0(p, insn, brw_ip_reg());
1788 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1789 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1790 } else {
1791 insn = next_insn(p, BRW_OPCODE_WHILE);
1792 do_insn = get_inner_do_insn(p);
1793
1794 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1795
1796 brw_set_dest(p, insn, brw_ip_reg());
1797 brw_set_src0(p, insn, brw_ip_reg());
1798 brw_set_src1(p, insn, brw_imm_d(0));
1799
1800 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1801 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1802 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1803
1804 brw_patch_break_cont(p, insn);
1805 }
1806 }
1807 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1808
1809 p->loop_stack_depth--;
1810
1811 return insn;
1812 }
1813
1814 /* FORWARD JUMPS:
1815 */
1816 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1817 {
1818 const struct gen_device_info *devinfo = p->devinfo;
1819 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1820 unsigned jmpi = 1;
1821
1822 if (devinfo->gen >= 5)
1823 jmpi = 2;
1824
1825 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1826 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1827
1828 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1829 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1830 }
1831
1832 /* To integrate with the above, it makes sense that the comparison
1833 * instruction should populate the flag register. It might be simpler
1834 * just to use the flag reg for most WM tasks?
1835 */
1836 void brw_CMP(struct brw_codegen *p,
1837 struct brw_reg dest,
1838 unsigned conditional,
1839 struct brw_reg src0,
1840 struct brw_reg src1)
1841 {
1842 const struct gen_device_info *devinfo = p->devinfo;
1843 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1844
1845 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1846 brw_set_dest(p, insn, dest);
1847 brw_set_src0(p, insn, src0);
1848 brw_set_src1(p, insn, src1);
1849
1850 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1851 * page says:
1852 * "Any CMP instruction with a null destination must use a {switch}."
1853 *
1854 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1855 * mentioned on their work-arounds pages.
1856 */
1857 if (devinfo->gen == 7) {
1858 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1859 dest.nr == BRW_ARF_NULL) {
1860 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1861 }
1862 }
1863 }
1864
1865 /***********************************************************************
1866 * Helpers for the various SEND message types:
1867 */
1868
1869 /** Extended math function, float[8].
1870 */
1871 void gen4_math(struct brw_codegen *p,
1872 struct brw_reg dest,
1873 unsigned function,
1874 unsigned msg_reg_nr,
1875 struct brw_reg src,
1876 unsigned precision )
1877 {
1878 const struct gen_device_info *devinfo = p->devinfo;
1879 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1880 unsigned data_type;
1881 if (has_scalar_region(src)) {
1882 data_type = BRW_MATH_DATA_SCALAR;
1883 } else {
1884 data_type = BRW_MATH_DATA_VECTOR;
1885 }
1886
1887 assert(devinfo->gen < 6);
1888
1889 /* Example code doesn't set predicate_control for send
1890 * instructions.
1891 */
1892 brw_inst_set_pred_control(devinfo, insn, 0);
1893 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1894
1895 brw_set_dest(p, insn, dest);
1896 brw_set_src0(p, insn, src);
1897 brw_set_math_message(p,
1898 insn,
1899 function,
1900 src.type == BRW_REGISTER_TYPE_D,
1901 precision,
1902 data_type);
1903 }
1904
1905 void gen6_math(struct brw_codegen *p,
1906 struct brw_reg dest,
1907 unsigned function,
1908 struct brw_reg src0,
1909 struct brw_reg src1)
1910 {
1911 const struct gen_device_info *devinfo = p->devinfo;
1912 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1913
1914 assert(devinfo->gen >= 6);
1915
1916 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1917 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1918
1919 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1920 if (devinfo->gen == 6) {
1921 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1922 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1923 }
1924
1925 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1926 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1927 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1928 assert(src0.type != BRW_REGISTER_TYPE_F);
1929 assert(src1.type != BRW_REGISTER_TYPE_F);
1930 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1931 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1932 } else {
1933 assert(src0.type == BRW_REGISTER_TYPE_F);
1934 assert(src1.type == BRW_REGISTER_TYPE_F);
1935 }
1936
1937 /* Source modifiers are ignored for extended math instructions on Gen6. */
1938 if (devinfo->gen == 6) {
1939 assert(!src0.negate);
1940 assert(!src0.abs);
1941 assert(!src1.negate);
1942 assert(!src1.abs);
1943 }
1944
1945 brw_inst_set_math_function(devinfo, insn, function);
1946
1947 brw_set_dest(p, insn, dest);
1948 brw_set_src0(p, insn, src0);
1949 brw_set_src1(p, insn, src1);
1950 }
1951
1952 /**
1953 * Return the right surface index to access the thread scratch space using
1954 * stateless dataport messages.
1955 */
1956 unsigned
1957 brw_scratch_surface_idx(const struct brw_codegen *p)
1958 {
1959 /* The scratch space is thread-local so IA coherency is unnecessary. */
1960 if (p->devinfo->gen >= 8)
1961 return GEN8_BTI_STATELESS_NON_COHERENT;
1962 else
1963 return BRW_BTI_STATELESS;
1964 }
1965
1966 /**
1967 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1968 * using a constant offset per channel.
1969 *
1970 * The offset must be aligned to oword size (16 bytes). Used for
1971 * register spilling.
1972 */
1973 void brw_oword_block_write_scratch(struct brw_codegen *p,
1974 struct brw_reg mrf,
1975 int num_regs,
1976 unsigned offset)
1977 {
1978 const struct gen_device_info *devinfo = p->devinfo;
1979 const unsigned target_cache =
1980 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1981 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1982 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
1983 uint32_t msg_type;
1984
1985 if (devinfo->gen >= 6)
1986 offset /= 16;
1987
1988 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1989
1990 const unsigned mlen = 1 + num_regs;
1991
1992 /* Set up the message header. This is g0, with g0.2 filled with
1993 * the offset. We don't want to leave our offset around in g0 or
1994 * it'll screw up texture samples, so set it up inside the message
1995 * reg.
1996 */
1997 {
1998 brw_push_insn_state(p);
1999 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2000 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2001 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2002
2003 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2004
2005 /* set message header global offset field (reg 0, element 2) */
2006 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2007 brw_MOV(p,
2008 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2009 mrf.nr,
2010 2), BRW_REGISTER_TYPE_UD),
2011 brw_imm_ud(offset));
2012
2013 brw_pop_insn_state(p);
2014 }
2015
2016 {
2017 struct brw_reg dest;
2018 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2019 int send_commit_msg;
2020 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2021 BRW_REGISTER_TYPE_UW);
2022
2023 brw_inst_set_compression(devinfo, insn, false);
2024
2025 if (brw_inst_exec_size(devinfo, insn) >= 16)
2026 src_header = vec16(src_header);
2027
2028 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2029 if (devinfo->gen < 6)
2030 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2031
2032 /* Until gen6, writes followed by reads from the same location
2033 * are not guaranteed to be ordered unless write_commit is set.
2034 * If set, then a no-op write is issued to the destination
2035 * register to set a dependency, and a read from the destination
2036 * can be used to ensure the ordering.
2037 *
2038 * For gen6, only writes between different threads need ordering
2039 * protection. Our use of DP writes is all about register
2040 * spilling within a thread.
2041 */
2042 if (devinfo->gen >= 6) {
2043 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2044 send_commit_msg = 0;
2045 } else {
2046 dest = src_header;
2047 send_commit_msg = 1;
2048 }
2049
2050 brw_set_dest(p, insn, dest);
2051 if (devinfo->gen >= 6) {
2052 brw_set_src0(p, insn, mrf);
2053 } else {
2054 brw_set_src0(p, insn, brw_null_reg());
2055 }
2056
2057 if (devinfo->gen >= 6)
2058 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2059 else
2060 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2061
2062 brw_set_dp_write_message(p,
2063 insn,
2064 brw_scratch_surface_idx(p),
2065 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2066 msg_type,
2067 target_cache,
2068 mlen,
2069 true, /* header_present */
2070 0, /* not a render target */
2071 send_commit_msg, /* response_length */
2072 0, /* eot */
2073 send_commit_msg);
2074 }
2075 }
2076
2077
2078 /**
2079 * Read a block of owords (half a GRF each) from the scratch buffer
2080 * using a constant index per channel.
2081 *
2082 * Offset must be aligned to oword size (16 bytes). Used for register
2083 * spilling.
2084 */
2085 void
2086 brw_oword_block_read_scratch(struct brw_codegen *p,
2087 struct brw_reg dest,
2088 struct brw_reg mrf,
2089 int num_regs,
2090 unsigned offset)
2091 {
2092 const struct gen_device_info *devinfo = p->devinfo;
2093
2094 if (devinfo->gen >= 6)
2095 offset /= 16;
2096
2097 if (p->devinfo->gen >= 7) {
2098 /* On gen 7 and above, we no longer have message registers and we can
2099 * send from any register we want. By using the destination register
2100 * for the message, we guarantee that the implied message write won't
2101 * accidentally overwrite anything. This has been a problem because
2102 * the MRF registers and source for the final FB write are both fixed
2103 * and may overlap.
2104 */
2105 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2106 } else {
2107 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2108 }
2109 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2110
2111 const unsigned rlen = num_regs;
2112 const unsigned target_cache =
2113 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2114 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2115 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2116
2117 {
2118 brw_push_insn_state(p);
2119 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2120 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2121 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2122
2123 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2124
2125 /* set message header global offset field (reg 0, element 2) */
2126 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2127 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2128
2129 brw_pop_insn_state(p);
2130 }
2131
2132 {
2133 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2134
2135 assert(brw_inst_pred_control(devinfo, insn) == 0);
2136 brw_inst_set_compression(devinfo, insn, false);
2137
2138 brw_set_dest(p, insn, dest); /* UW? */
2139 if (devinfo->gen >= 6) {
2140 brw_set_src0(p, insn, mrf);
2141 } else {
2142 brw_set_src0(p, insn, brw_null_reg());
2143 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2144 }
2145
2146 brw_set_dp_read_message(p,
2147 insn,
2148 brw_scratch_surface_idx(p),
2149 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2150 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2151 target_cache,
2152 1, /* msg_length */
2153 true, /* header_present */
2154 rlen);
2155 }
2156 }
2157
2158 void
2159 gen7_block_read_scratch(struct brw_codegen *p,
2160 struct brw_reg dest,
2161 int num_regs,
2162 unsigned offset)
2163 {
2164 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2165 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2166
2167 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2168
2169 /* The HW requires that the header is present; this is to get the g0.5
2170 * scratch offset.
2171 */
2172 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2173
2174 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2175 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2176 * is 32 bytes, which happens to be the size of a register.
2177 */
2178 offset /= REG_SIZE;
2179 assert(offset < (1 << 12));
2180
2181 gen7_set_dp_scratch_message(p, insn,
2182 false, /* scratch read */
2183 false, /* OWords */
2184 false, /* invalidate after read */
2185 num_regs,
2186 offset,
2187 1, /* mlen: just g0 */
2188 num_regs, /* rlen */
2189 true); /* header present */
2190 }
2191
2192 /**
2193 * Read float[4] vectors from the data port constant cache.
2194 * Location (in buffer) should be a multiple of 16.
2195 * Used for fetching shader constants.
2196 */
2197 void brw_oword_block_read(struct brw_codegen *p,
2198 struct brw_reg dest,
2199 struct brw_reg mrf,
2200 uint32_t offset,
2201 uint32_t bind_table_index)
2202 {
2203 const struct gen_device_info *devinfo = p->devinfo;
2204 const unsigned target_cache =
2205 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2206 BRW_DATAPORT_READ_TARGET_DATA_CACHE);
2207 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
2208
2209 /* On newer hardware, offset is in units of owords. */
2210 if (devinfo->gen >= 6)
2211 offset /= 16;
2212
2213 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2214
2215 brw_push_insn_state(p);
2216 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2217 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2218 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2219
2220 brw_push_insn_state(p);
2221 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2222 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2223
2224 /* set message header global offset field (reg 0, element 2) */
2225 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2226 brw_MOV(p,
2227 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2228 mrf.nr,
2229 2), BRW_REGISTER_TYPE_UD),
2230 brw_imm_ud(offset));
2231 brw_pop_insn_state(p);
2232
2233 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2234
2235 /* cast dest to a uword[8] vector */
2236 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2237
2238 brw_set_dest(p, insn, dest);
2239 if (devinfo->gen >= 6) {
2240 brw_set_src0(p, insn, mrf);
2241 } else {
2242 brw_set_src0(p, insn, brw_null_reg());
2243 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2244 }
2245
2246 brw_set_dp_read_message(p, insn, bind_table_index,
2247 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2248 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2249 target_cache,
2250 1, /* msg_length */
2251 true, /* header_present */
2252 DIV_ROUND_UP(exec_size, 8)); /* response_length */
2253
2254 brw_pop_insn_state(p);
2255 }
2256
2257
2258 void brw_fb_WRITE(struct brw_codegen *p,
2259 struct brw_reg payload,
2260 struct brw_reg implied_header,
2261 unsigned msg_control,
2262 unsigned binding_table_index,
2263 unsigned msg_length,
2264 unsigned response_length,
2265 bool eot,
2266 bool last_render_target,
2267 bool header_present)
2268 {
2269 const struct gen_device_info *devinfo = p->devinfo;
2270 const unsigned target_cache =
2271 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2272 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2273 brw_inst *insn;
2274 unsigned msg_type;
2275 struct brw_reg dest, src0;
2276
2277 if (brw_inst_exec_size(devinfo, p->current) >= BRW_EXECUTE_16)
2278 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2279 else
2280 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2281
2282 if (devinfo->gen >= 6) {
2283 insn = next_insn(p, BRW_OPCODE_SENDC);
2284 } else {
2285 insn = next_insn(p, BRW_OPCODE_SEND);
2286 }
2287 brw_inst_set_compression(devinfo, insn, false);
2288
2289 if (devinfo->gen >= 6) {
2290 /* headerless version, just submit color payload */
2291 src0 = payload;
2292
2293 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2294 } else {
2295 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2296 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2297 src0 = implied_header;
2298
2299 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2300 }
2301
2302 brw_set_dest(p, insn, dest);
2303 brw_set_src0(p, insn, src0);
2304 brw_set_dp_write_message(p,
2305 insn,
2306 binding_table_index,
2307 msg_control,
2308 msg_type,
2309 target_cache,
2310 msg_length,
2311 header_present,
2312 last_render_target,
2313 response_length,
2314 eot,
2315 0 /* send_commit_msg */);
2316 }
2317
2318 brw_inst *
2319 gen9_fb_READ(struct brw_codegen *p,
2320 struct brw_reg dst,
2321 struct brw_reg payload,
2322 unsigned binding_table_index,
2323 unsigned msg_length,
2324 unsigned response_length,
2325 bool per_sample)
2326 {
2327 const struct gen_device_info *devinfo = p->devinfo;
2328 assert(devinfo->gen >= 9);
2329 const unsigned msg_subtype =
2330 brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16 ? 0 : 1;
2331 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2332
2333 brw_set_dest(p, insn, dst);
2334 brw_set_src0(p, insn, payload);
2335 brw_set_dp_read_message(p, insn, binding_table_index,
2336 per_sample << 5 | msg_subtype,
2337 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2338 GEN6_SFID_DATAPORT_RENDER_CACHE,
2339 msg_length, true /* header_present */,
2340 response_length);
2341 brw_inst_set_rt_slot_group(devinfo, insn,
2342 brw_inst_qtr_control(devinfo, p->current) / 2);
2343
2344 return insn;
2345 }
2346
2347 /**
2348 * Texture sample instruction.
2349 * Note: the msg_type plus msg_length values determine exactly what kind
2350 * of sampling operation is performed. See volume 4, page 161 of docs.
2351 */
2352 void brw_SAMPLE(struct brw_codegen *p,
2353 struct brw_reg dest,
2354 unsigned msg_reg_nr,
2355 struct brw_reg src0,
2356 unsigned binding_table_index,
2357 unsigned sampler,
2358 unsigned msg_type,
2359 unsigned response_length,
2360 unsigned msg_length,
2361 unsigned header_present,
2362 unsigned simd_mode,
2363 unsigned return_format)
2364 {
2365 const struct gen_device_info *devinfo = p->devinfo;
2366 brw_inst *insn;
2367
2368 if (msg_reg_nr != -1)
2369 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2370
2371 insn = next_insn(p, BRW_OPCODE_SEND);
2372 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2373
2374 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2375 *
2376 * "Instruction compression is not allowed for this instruction (that
2377 * is, send). The hardware behavior is undefined if this instruction is
2378 * set as compressed. However, compress control can be set to "SecHalf"
2379 * to affect the EMask generation."
2380 *
2381 * No similar wording is found in later PRMs, but there are examples
2382 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2383 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2384 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2385 */
2386 brw_inst_set_compression(devinfo, insn, false);
2387
2388 if (devinfo->gen < 6)
2389 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2390
2391 brw_set_dest(p, insn, dest);
2392 brw_set_src0(p, insn, src0);
2393 brw_set_sampler_message(p, insn,
2394 binding_table_index,
2395 sampler,
2396 msg_type,
2397 response_length,
2398 msg_length,
2399 header_present,
2400 simd_mode,
2401 return_format);
2402 }
2403
2404 /* Adjust the message header's sampler state pointer to
2405 * select the correct group of 16 samplers.
2406 */
2407 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2408 struct brw_reg header,
2409 struct brw_reg sampler_index)
2410 {
2411 /* The "Sampler Index" field can only store values between 0 and 15.
2412 * However, we can add an offset to the "Sampler State Pointer"
2413 * field, effectively selecting a different set of 16 samplers.
2414 *
2415 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2416 * offset, and each sampler state is only 16-bytes, so we can't
2417 * exclusively use the offset - we have to use both.
2418 */
2419
2420 const struct gen_device_info *devinfo = p->devinfo;
2421
2422 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2423 const int sampler_state_size = 16; /* 16 bytes */
2424 uint32_t sampler = sampler_index.ud;
2425
2426 if (sampler >= 16) {
2427 assert(devinfo->is_haswell || devinfo->gen >= 8);
2428 brw_ADD(p,
2429 get_element_ud(header, 3),
2430 get_element_ud(brw_vec8_grf(0, 0), 3),
2431 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2432 }
2433 } else {
2434 /* Non-const sampler array indexing case */
2435 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2436 return;
2437 }
2438
2439 struct brw_reg temp = get_element_ud(header, 3);
2440
2441 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2442 brw_SHL(p, temp, temp, brw_imm_ud(4));
2443 brw_ADD(p,
2444 get_element_ud(header, 3),
2445 get_element_ud(brw_vec8_grf(0, 0), 3),
2446 temp);
2447 }
2448 }
2449
2450 /* All these variables are pretty confusing - we might be better off
2451 * using bitmasks and macros for this, in the old style. Or perhaps
2452 * just having the caller instantiate the fields in dword3 itself.
2453 */
2454 void brw_urb_WRITE(struct brw_codegen *p,
2455 struct brw_reg dest,
2456 unsigned msg_reg_nr,
2457 struct brw_reg src0,
2458 enum brw_urb_write_flags flags,
2459 unsigned msg_length,
2460 unsigned response_length,
2461 unsigned offset,
2462 unsigned swizzle)
2463 {
2464 const struct gen_device_info *devinfo = p->devinfo;
2465 brw_inst *insn;
2466
2467 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2468
2469 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2470 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2471 brw_push_insn_state(p);
2472 brw_set_default_access_mode(p, BRW_ALIGN_1);
2473 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2474 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2475 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2476 BRW_REGISTER_TYPE_UD),
2477 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2478 brw_imm_ud(0xff00));
2479 brw_pop_insn_state(p);
2480 }
2481
2482 insn = next_insn(p, BRW_OPCODE_SEND);
2483
2484 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2485
2486 brw_set_dest(p, insn, dest);
2487 brw_set_src0(p, insn, src0);
2488 brw_set_src1(p, insn, brw_imm_d(0));
2489
2490 if (devinfo->gen < 6)
2491 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2492
2493 brw_set_urb_message(p,
2494 insn,
2495 flags,
2496 msg_length,
2497 response_length,
2498 offset,
2499 swizzle);
2500 }
2501
2502 struct brw_inst *
2503 brw_send_indirect_message(struct brw_codegen *p,
2504 unsigned sfid,
2505 struct brw_reg dst,
2506 struct brw_reg payload,
2507 struct brw_reg desc)
2508 {
2509 const struct gen_device_info *devinfo = p->devinfo;
2510 struct brw_inst *send;
2511 int setup;
2512
2513 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2514
2515 assert(desc.type == BRW_REGISTER_TYPE_UD);
2516
2517 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2518 * in the indirect case) by its index in the instruction store. The
2519 * pointer returned by next_insn() may become invalid if emitting the SEND
2520 * in the indirect case reallocs the store.
2521 */
2522
2523 if (desc.file == BRW_IMMEDIATE_VALUE) {
2524 setup = p->nr_insn;
2525 send = next_insn(p, BRW_OPCODE_SEND);
2526 brw_set_src1(p, send, desc);
2527
2528 } else {
2529 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2530
2531 brw_push_insn_state(p);
2532 brw_set_default_access_mode(p, BRW_ALIGN_1);
2533 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2534 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2535 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2536
2537 /* Load the indirect descriptor to an address register using OR so the
2538 * caller can specify additional descriptor bits with the usual
2539 * brw_set_*_message() helper functions.
2540 */
2541 setup = p->nr_insn;
2542 brw_OR(p, addr, desc, brw_imm_ud(0));
2543
2544 brw_pop_insn_state(p);
2545
2546 send = next_insn(p, BRW_OPCODE_SEND);
2547 brw_set_src1(p, send, addr);
2548 }
2549
2550 if (dst.width < BRW_EXECUTE_8)
2551 brw_inst_set_exec_size(devinfo, send, dst.width);
2552
2553 brw_set_dest(p, send, dst);
2554 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2555 brw_inst_set_sfid(devinfo, send, sfid);
2556
2557 return &p->store[setup];
2558 }
2559
2560 static struct brw_inst *
2561 brw_send_indirect_surface_message(struct brw_codegen *p,
2562 unsigned sfid,
2563 struct brw_reg dst,
2564 struct brw_reg payload,
2565 struct brw_reg surface,
2566 unsigned message_len,
2567 unsigned response_len,
2568 bool header_present)
2569 {
2570 const struct gen_device_info *devinfo = p->devinfo;
2571 struct brw_inst *insn;
2572
2573 if (surface.file != BRW_IMMEDIATE_VALUE) {
2574 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2575
2576 brw_push_insn_state(p);
2577 brw_set_default_access_mode(p, BRW_ALIGN_1);
2578 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2579 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2580 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2581
2582 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2583 * some surface array is accessed out of bounds.
2584 */
2585 insn = brw_AND(p, addr,
2586 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2587 BRW_GET_SWZ(surface.swizzle, 0)),
2588 brw_imm_ud(0xff));
2589
2590 brw_pop_insn_state(p);
2591
2592 surface = addr;
2593 }
2594
2595 insn = brw_send_indirect_message(p, sfid, dst, payload, surface);
2596 brw_inst_set_mlen(devinfo, insn, message_len);
2597 brw_inst_set_rlen(devinfo, insn, response_len);
2598 brw_inst_set_header_present(devinfo, insn, header_present);
2599
2600 return insn;
2601 }
2602
2603 static bool
2604 while_jumps_before_offset(const struct gen_device_info *devinfo,
2605 brw_inst *insn, int while_offset, int start_offset)
2606 {
2607 int scale = 16 / brw_jump_scale(devinfo);
2608 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2609 : brw_inst_jip(devinfo, insn);
2610 assert(jip < 0);
2611 return while_offset + jip * scale <= start_offset;
2612 }
2613
2614
2615 static int
2616 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2617 {
2618 int offset;
2619 void *store = p->store;
2620 const struct gen_device_info *devinfo = p->devinfo;
2621
2622 int depth = 0;
2623
2624 for (offset = next_offset(devinfo, store, start_offset);
2625 offset < p->next_insn_offset;
2626 offset = next_offset(devinfo, store, offset)) {
2627 brw_inst *insn = store + offset;
2628
2629 switch (brw_inst_opcode(devinfo, insn)) {
2630 case BRW_OPCODE_IF:
2631 depth++;
2632 break;
2633 case BRW_OPCODE_ENDIF:
2634 if (depth == 0)
2635 return offset;
2636 depth--;
2637 break;
2638 case BRW_OPCODE_WHILE:
2639 /* If the while doesn't jump before our instruction, it's the end
2640 * of a sibling do...while loop. Ignore it.
2641 */
2642 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2643 continue;
2644 /* fallthrough */
2645 case BRW_OPCODE_ELSE:
2646 case BRW_OPCODE_HALT:
2647 if (depth == 0)
2648 return offset;
2649 }
2650 }
2651
2652 return 0;
2653 }
2654
2655 /* There is no DO instruction on gen6, so to find the end of the loop
2656 * we have to see if the loop is jumping back before our start
2657 * instruction.
2658 */
2659 static int
2660 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2661 {
2662 const struct gen_device_info *devinfo = p->devinfo;
2663 int offset;
2664 void *store = p->store;
2665
2666 assert(devinfo->gen >= 6);
2667
2668 /* Always start after the instruction (such as a WHILE) we're trying to fix
2669 * up.
2670 */
2671 for (offset = next_offset(devinfo, store, start_offset);
2672 offset < p->next_insn_offset;
2673 offset = next_offset(devinfo, store, offset)) {
2674 brw_inst *insn = store + offset;
2675
2676 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2677 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2678 return offset;
2679 }
2680 }
2681 assert(!"not reached");
2682 return start_offset;
2683 }
2684
2685 /* After program generation, go back and update the UIP and JIP of
2686 * BREAK, CONT, and HALT instructions to their correct locations.
2687 */
2688 void
2689 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2690 {
2691 const struct gen_device_info *devinfo = p->devinfo;
2692 int offset;
2693 int br = brw_jump_scale(devinfo);
2694 int scale = 16 / br;
2695 void *store = p->store;
2696
2697 if (devinfo->gen < 6)
2698 return;
2699
2700 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2701 brw_inst *insn = store + offset;
2702 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2703
2704 int block_end_offset = brw_find_next_block_end(p, offset);
2705 switch (brw_inst_opcode(devinfo, insn)) {
2706 case BRW_OPCODE_BREAK:
2707 assert(block_end_offset != 0);
2708 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2709 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2710 brw_inst_set_uip(devinfo, insn,
2711 (brw_find_loop_end(p, offset) - offset +
2712 (devinfo->gen == 6 ? 16 : 0)) / scale);
2713 break;
2714 case BRW_OPCODE_CONTINUE:
2715 assert(block_end_offset != 0);
2716 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2717 brw_inst_set_uip(devinfo, insn,
2718 (brw_find_loop_end(p, offset) - offset) / scale);
2719
2720 assert(brw_inst_uip(devinfo, insn) != 0);
2721 assert(brw_inst_jip(devinfo, insn) != 0);
2722 break;
2723
2724 case BRW_OPCODE_ENDIF: {
2725 int32_t jump = (block_end_offset == 0) ?
2726 1 * br : (block_end_offset - offset) / scale;
2727 if (devinfo->gen >= 7)
2728 brw_inst_set_jip(devinfo, insn, jump);
2729 else
2730 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2731 break;
2732 }
2733
2734 case BRW_OPCODE_HALT:
2735 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2736 *
2737 * "In case of the halt instruction not inside any conditional
2738 * code block, the value of <JIP> and <UIP> should be the
2739 * same. In case of the halt instruction inside conditional code
2740 * block, the <UIP> should be the end of the program, and the
2741 * <JIP> should be end of the most inner conditional code block."
2742 *
2743 * The uip will have already been set by whoever set up the
2744 * instruction.
2745 */
2746 if (block_end_offset == 0) {
2747 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2748 } else {
2749 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2750 }
2751 assert(brw_inst_uip(devinfo, insn) != 0);
2752 assert(brw_inst_jip(devinfo, insn) != 0);
2753 break;
2754 }
2755 }
2756 }
2757
2758 void brw_ff_sync(struct brw_codegen *p,
2759 struct brw_reg dest,
2760 unsigned msg_reg_nr,
2761 struct brw_reg src0,
2762 bool allocate,
2763 unsigned response_length,
2764 bool eot)
2765 {
2766 const struct gen_device_info *devinfo = p->devinfo;
2767 brw_inst *insn;
2768
2769 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2770
2771 insn = next_insn(p, BRW_OPCODE_SEND);
2772 brw_set_dest(p, insn, dest);
2773 brw_set_src0(p, insn, src0);
2774 brw_set_src1(p, insn, brw_imm_d(0));
2775
2776 if (devinfo->gen < 6)
2777 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2778
2779 brw_set_ff_sync_message(p,
2780 insn,
2781 allocate,
2782 response_length,
2783 eot);
2784 }
2785
2786 /**
2787 * Emit the SEND instruction necessary to generate stream output data on Gen6
2788 * (for transform feedback).
2789 *
2790 * If send_commit_msg is true, this is the last piece of stream output data
2791 * from this thread, so send the data as a committed write. According to the
2792 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2793 *
2794 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2795 * writes are complete by sending the final write as a committed write."
2796 */
2797 void
2798 brw_svb_write(struct brw_codegen *p,
2799 struct brw_reg dest,
2800 unsigned msg_reg_nr,
2801 struct brw_reg src0,
2802 unsigned binding_table_index,
2803 bool send_commit_msg)
2804 {
2805 const struct gen_device_info *devinfo = p->devinfo;
2806 const unsigned target_cache =
2807 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2808 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2809 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2810 brw_inst *insn;
2811
2812 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2813
2814 insn = next_insn(p, BRW_OPCODE_SEND);
2815 brw_set_dest(p, insn, dest);
2816 brw_set_src0(p, insn, src0);
2817 brw_set_src1(p, insn, brw_imm_d(0));
2818 brw_set_dp_write_message(p, insn,
2819 binding_table_index,
2820 0, /* msg_control: ignored */
2821 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2822 target_cache,
2823 1, /* msg_length */
2824 true, /* header_present */
2825 0, /* last_render_target: ignored */
2826 send_commit_msg, /* response_length */
2827 0, /* end_of_thread */
2828 send_commit_msg); /* send_commit_msg */
2829 }
2830
2831 static unsigned
2832 brw_surface_payload_size(struct brw_codegen *p,
2833 unsigned num_channels,
2834 bool has_simd4x2,
2835 bool has_simd16)
2836 {
2837 if (has_simd4x2 &&
2838 brw_inst_access_mode(p->devinfo, p->current) == BRW_ALIGN_16)
2839 return 1;
2840 else if (has_simd16 &&
2841 brw_inst_exec_size(p->devinfo, p->current) == BRW_EXECUTE_16)
2842 return 2 * num_channels;
2843 else
2844 return num_channels;
2845 }
2846
2847 static void
2848 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2849 brw_inst *insn,
2850 unsigned atomic_op,
2851 bool response_expected)
2852 {
2853 const struct gen_device_info *devinfo = p->devinfo;
2854 unsigned msg_control =
2855 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2856 (response_expected ? 1 << 5 : 0); /* Return data expected */
2857
2858 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2859 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2860 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2861 msg_control |= 1 << 4; /* SIMD8 mode */
2862
2863 brw_inst_set_dp_msg_type(devinfo, insn,
2864 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2865 } else {
2866 brw_inst_set_dp_msg_type(devinfo, insn,
2867 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2868 }
2869 } else {
2870 brw_inst_set_dp_msg_type(devinfo, insn,
2871 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2872
2873 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2874 msg_control |= 1 << 4; /* SIMD8 mode */
2875 }
2876
2877 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2878 }
2879
2880 void
2881 brw_untyped_atomic(struct brw_codegen *p,
2882 struct brw_reg dst,
2883 struct brw_reg payload,
2884 struct brw_reg surface,
2885 unsigned atomic_op,
2886 unsigned msg_length,
2887 bool response_expected,
2888 bool header_present)
2889 {
2890 const struct gen_device_info *devinfo = p->devinfo;
2891 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2892 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2893 GEN7_SFID_DATAPORT_DATA_CACHE);
2894 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2895 /* Mask out unused components -- This is especially important in Align16
2896 * mode on generations that don't have native support for SIMD4x2 atomics,
2897 * because unused but enabled components will cause the dataport to perform
2898 * additional atomic operations on the addresses that happen to be in the
2899 * uninitialized Y, Z and W coordinates of the payload.
2900 */
2901 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2902 struct brw_inst *insn = brw_send_indirect_surface_message(
2903 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2904 brw_surface_payload_size(p, response_expected,
2905 devinfo->gen >= 8 || devinfo->is_haswell, true),
2906 header_present);
2907
2908 brw_set_dp_untyped_atomic_message(
2909 p, insn, atomic_op, response_expected);
2910 }
2911
2912 static void
2913 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2914 struct brw_inst *insn,
2915 unsigned num_channels)
2916 {
2917 const struct gen_device_info *devinfo = p->devinfo;
2918 /* Set mask of 32-bit channels to drop. */
2919 unsigned msg_control = 0xf & (0xf << num_channels);
2920
2921 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2922 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2923 msg_control |= 1 << 4; /* SIMD16 mode */
2924 else
2925 msg_control |= 2 << 4; /* SIMD8 mode */
2926 }
2927
2928 brw_inst_set_dp_msg_type(devinfo, insn,
2929 (devinfo->gen >= 8 || devinfo->is_haswell ?
2930 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2931 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
2932 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2933 }
2934
2935 void
2936 brw_untyped_surface_read(struct brw_codegen *p,
2937 struct brw_reg dst,
2938 struct brw_reg payload,
2939 struct brw_reg surface,
2940 unsigned msg_length,
2941 unsigned num_channels)
2942 {
2943 const struct gen_device_info *devinfo = p->devinfo;
2944 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2945 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2946 GEN7_SFID_DATAPORT_DATA_CACHE);
2947 struct brw_inst *insn = brw_send_indirect_surface_message(
2948 p, sfid, dst, payload, surface, msg_length,
2949 brw_surface_payload_size(p, num_channels, true, true),
2950 false);
2951
2952 brw_set_dp_untyped_surface_read_message(
2953 p, insn, num_channels);
2954 }
2955
2956 static void
2957 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
2958 struct brw_inst *insn,
2959 unsigned num_channels)
2960 {
2961 const struct gen_device_info *devinfo = p->devinfo;
2962 /* Set mask of 32-bit channels to drop. */
2963 unsigned msg_control = 0xf & (0xf << num_channels);
2964
2965 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2966 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2967 msg_control |= 1 << 4; /* SIMD16 mode */
2968 else
2969 msg_control |= 2 << 4; /* SIMD8 mode */
2970 } else {
2971 if (devinfo->gen >= 8 || devinfo->is_haswell)
2972 msg_control |= 0 << 4; /* SIMD4x2 mode */
2973 else
2974 msg_control |= 2 << 4; /* SIMD8 mode */
2975 }
2976
2977 brw_inst_set_dp_msg_type(devinfo, insn,
2978 devinfo->gen >= 8 || devinfo->is_haswell ?
2979 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2980 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2981 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2982 }
2983
2984 void
2985 brw_untyped_surface_write(struct brw_codegen *p,
2986 struct brw_reg payload,
2987 struct brw_reg surface,
2988 unsigned msg_length,
2989 unsigned num_channels,
2990 bool header_present)
2991 {
2992 const struct gen_device_info *devinfo = p->devinfo;
2993 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2994 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2995 GEN7_SFID_DATAPORT_DATA_CACHE);
2996 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2997 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2998 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2999 WRITEMASK_X : WRITEMASK_XYZW;
3000 struct brw_inst *insn = brw_send_indirect_surface_message(
3001 p, sfid, brw_writemask(brw_null_reg(), mask),
3002 payload, surface, msg_length, 0, header_present);
3003
3004 brw_set_dp_untyped_surface_write_message(
3005 p, insn, num_channels);
3006 }
3007
3008 static unsigned
3009 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
3010 {
3011 switch (bit_size) {
3012 case 8:
3013 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
3014 case 16:
3015 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
3016 case 32:
3017 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
3018 default:
3019 unreachable("Unsupported bit_size for byte scattered messages");
3020 }
3021 }
3022
3023
3024 void
3025 brw_byte_scattered_read(struct brw_codegen *p,
3026 struct brw_reg dst,
3027 struct brw_reg payload,
3028 struct brw_reg surface,
3029 unsigned msg_length,
3030 unsigned bit_size)
3031 {
3032 const struct gen_device_info *devinfo = p->devinfo;
3033 assert(devinfo->gen > 7 || devinfo->is_haswell);
3034 assert(brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3035 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3036
3037 struct brw_inst *insn = brw_send_indirect_surface_message(
3038 p, sfid, dst, payload, surface, msg_length,
3039 brw_surface_payload_size(p, 1, true, true),
3040 false);
3041
3042 unsigned msg_control =
3043 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3044
3045 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
3046 msg_control |= 1; /* SIMD16 mode */
3047 else
3048 msg_control |= 0; /* SIMD8 mode */
3049
3050 brw_inst_set_dp_msg_type(devinfo, insn,
3051 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3052 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3053 }
3054
3055 void
3056 brw_byte_scattered_write(struct brw_codegen *p,
3057 struct brw_reg payload,
3058 struct brw_reg surface,
3059 unsigned msg_length,
3060 unsigned bit_size,
3061 bool header_present)
3062 {
3063 const struct gen_device_info *devinfo = p->devinfo;
3064 assert(devinfo->gen > 7 || devinfo->is_haswell);
3065 assert(brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3066 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3067
3068 struct brw_inst *insn = brw_send_indirect_surface_message(
3069 p, sfid, brw_writemask(brw_null_reg(), WRITEMASK_XYZW),
3070 payload, surface, msg_length, 0, header_present);
3071
3072 unsigned msg_control =
3073 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3074
3075 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
3076 msg_control |= 1;
3077 else
3078 msg_control |= 0;
3079
3080 brw_inst_set_dp_msg_type(devinfo, insn,
3081 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3082 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3083 }
3084
3085 static void
3086 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3087 struct brw_inst *insn,
3088 unsigned atomic_op,
3089 bool response_expected)
3090 {
3091 const struct gen_device_info *devinfo = p->devinfo;
3092 unsigned msg_control =
3093 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3094 (response_expected ? 1 << 5 : 0); /* Return data expected */
3095
3096 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3097 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3098 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3099 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3100
3101 brw_inst_set_dp_msg_type(devinfo, insn,
3102 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3103 } else {
3104 brw_inst_set_dp_msg_type(devinfo, insn,
3105 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3106 }
3107
3108 } else {
3109 brw_inst_set_dp_msg_type(devinfo, insn,
3110 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3111
3112 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3113 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3114 }
3115
3116 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3117 }
3118
3119 void
3120 brw_typed_atomic(struct brw_codegen *p,
3121 struct brw_reg dst,
3122 struct brw_reg payload,
3123 struct brw_reg surface,
3124 unsigned atomic_op,
3125 unsigned msg_length,
3126 bool response_expected,
3127 bool header_present) {
3128 const struct gen_device_info *devinfo = p->devinfo;
3129 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3130 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3131 GEN6_SFID_DATAPORT_RENDER_CACHE);
3132 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3133 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3134 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3135 struct brw_inst *insn = brw_send_indirect_surface_message(
3136 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3137 brw_surface_payload_size(p, response_expected,
3138 devinfo->gen >= 8 || devinfo->is_haswell, false),
3139 header_present);
3140
3141 brw_set_dp_typed_atomic_message(
3142 p, insn, atomic_op, response_expected);
3143 }
3144
3145 static void
3146 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3147 struct brw_inst *insn,
3148 unsigned num_channels)
3149 {
3150 const struct gen_device_info *devinfo = p->devinfo;
3151 /* Set mask of unused channels. */
3152 unsigned msg_control = 0xf & (0xf << num_channels);
3153
3154 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3155 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3156 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3157 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3158 else
3159 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3160 }
3161
3162 brw_inst_set_dp_msg_type(devinfo, insn,
3163 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3164 } else {
3165 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3166 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3167 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3168 }
3169
3170 brw_inst_set_dp_msg_type(devinfo, insn,
3171 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3172 }
3173
3174 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3175 }
3176
3177 void
3178 brw_typed_surface_read(struct brw_codegen *p,
3179 struct brw_reg dst,
3180 struct brw_reg payload,
3181 struct brw_reg surface,
3182 unsigned msg_length,
3183 unsigned num_channels,
3184 bool header_present)
3185 {
3186 const struct gen_device_info *devinfo = p->devinfo;
3187 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3188 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3189 GEN6_SFID_DATAPORT_RENDER_CACHE);
3190 struct brw_inst *insn = brw_send_indirect_surface_message(
3191 p, sfid, dst, payload, surface, msg_length,
3192 brw_surface_payload_size(p, num_channels,
3193 devinfo->gen >= 8 || devinfo->is_haswell, false),
3194 header_present);
3195
3196 brw_set_dp_typed_surface_read_message(
3197 p, insn, num_channels);
3198 }
3199
3200 static void
3201 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3202 struct brw_inst *insn,
3203 unsigned num_channels)
3204 {
3205 const struct gen_device_info *devinfo = p->devinfo;
3206 /* Set mask of unused channels. */
3207 unsigned msg_control = 0xf & (0xf << num_channels);
3208
3209 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3210 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3211 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3212 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3213 else
3214 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3215 }
3216
3217 brw_inst_set_dp_msg_type(devinfo, insn,
3218 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3219
3220 } else {
3221 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3222 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3223 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3224 }
3225
3226 brw_inst_set_dp_msg_type(devinfo, insn,
3227 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3228 }
3229
3230 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3231 }
3232
3233 void
3234 brw_typed_surface_write(struct brw_codegen *p,
3235 struct brw_reg payload,
3236 struct brw_reg surface,
3237 unsigned msg_length,
3238 unsigned num_channels,
3239 bool header_present)
3240 {
3241 const struct gen_device_info *devinfo = p->devinfo;
3242 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3243 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3244 GEN6_SFID_DATAPORT_RENDER_CACHE);
3245 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3246 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3247 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3248 WRITEMASK_X : WRITEMASK_XYZW);
3249 struct brw_inst *insn = brw_send_indirect_surface_message(
3250 p, sfid, brw_writemask(brw_null_reg(), mask),
3251 payload, surface, msg_length, 0, header_present);
3252
3253 brw_set_dp_typed_surface_write_message(
3254 p, insn, num_channels);
3255 }
3256
3257 static void
3258 brw_set_memory_fence_message(struct brw_codegen *p,
3259 struct brw_inst *insn,
3260 enum brw_message_target sfid,
3261 bool commit_enable)
3262 {
3263 const struct gen_device_info *devinfo = p->devinfo;
3264
3265 brw_set_message_descriptor(p, insn, sfid,
3266 1 /* message length */,
3267 (commit_enable ? 1 : 0) /* response length */,
3268 true /* header present */,
3269 false);
3270
3271 switch (sfid) {
3272 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3273 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3274 break;
3275 case GEN7_SFID_DATAPORT_DATA_CACHE:
3276 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3277 break;
3278 default:
3279 unreachable("Not reached");
3280 }
3281
3282 if (commit_enable)
3283 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3284 }
3285
3286 void
3287 brw_memory_fence(struct brw_codegen *p,
3288 struct brw_reg dst)
3289 {
3290 const struct gen_device_info *devinfo = p->devinfo;
3291 const bool commit_enable =
3292 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3293 (devinfo->gen == 7 && !devinfo->is_haswell);
3294 struct brw_inst *insn;
3295
3296 brw_push_insn_state(p);
3297 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3298 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3299 dst = vec1(dst);
3300
3301 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3302 * message doesn't write anything back.
3303 */
3304 insn = next_insn(p, BRW_OPCODE_SEND);
3305 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3306 brw_set_dest(p, insn, dst);
3307 brw_set_src0(p, insn, dst);
3308 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3309 commit_enable);
3310
3311 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3312 /* IVB does typed surface access through the render cache, so we need to
3313 * flush it too. Use a different register so both flushes can be
3314 * pipelined by the hardware.
3315 */
3316 insn = next_insn(p, BRW_OPCODE_SEND);
3317 brw_set_dest(p, insn, offset(dst, 1));
3318 brw_set_src0(p, insn, offset(dst, 1));
3319 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3320 commit_enable);
3321
3322 /* Now write the response of the second message into the response of the
3323 * first to trigger a pipeline stall -- This way future render and data
3324 * cache messages will be properly ordered with respect to past data and
3325 * render cache messages.
3326 */
3327 brw_MOV(p, dst, offset(dst, 1));
3328 }
3329
3330 brw_pop_insn_state(p);
3331 }
3332
3333 void
3334 brw_pixel_interpolator_query(struct brw_codegen *p,
3335 struct brw_reg dest,
3336 struct brw_reg mrf,
3337 bool noperspective,
3338 unsigned mode,
3339 struct brw_reg data,
3340 unsigned msg_length,
3341 unsigned response_length)
3342 {
3343 const struct gen_device_info *devinfo = p->devinfo;
3344 struct brw_inst *insn;
3345 const uint16_t exec_size = brw_inst_exec_size(devinfo, p->current);
3346
3347 /* brw_send_indirect_message will automatically use a direct send message
3348 * if data is actually immediate.
3349 */
3350 insn = brw_send_indirect_message(p,
3351 GEN7_SFID_PIXEL_INTERPOLATOR,
3352 dest,
3353 mrf,
3354 vec1(data));
3355 brw_inst_set_mlen(devinfo, insn, msg_length);
3356 brw_inst_set_rlen(devinfo, insn, response_length);
3357
3358 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3359 brw_inst_set_pi_slot_group(devinfo, insn, 0); /* zero unless 32/64px dispatch */
3360 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3361 brw_inst_set_pi_message_type(devinfo, insn, mode);
3362 }
3363
3364 void
3365 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3366 struct brw_reg mask)
3367 {
3368 const struct gen_device_info *devinfo = p->devinfo;
3369 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
3370 const unsigned qtr_control = brw_inst_qtr_control(devinfo, p->current);
3371 brw_inst *inst;
3372
3373 assert(devinfo->gen >= 7);
3374 assert(mask.type == BRW_REGISTER_TYPE_UD);
3375
3376 brw_push_insn_state(p);
3377
3378 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3379 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3380
3381 if (devinfo->gen >= 8) {
3382 /* Getting the first active channel index is easy on Gen8: Just find
3383 * the first bit set in the execution mask. The register exists on
3384 * HSW already but it reads back as all ones when the current
3385 * instruction has execution masking disabled, so it's kind of
3386 * useless.
3387 */
3388 struct brw_reg exec_mask =
3389 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3390
3391 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3392 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3393 /* Unfortunately, ce0 does not take into account the thread
3394 * dispatch mask, which may be a problem in cases where it's not
3395 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3396 * some n). Combine ce0 with the given dispatch (or vector) mask
3397 * to mask off those channels which were never dispatched by the
3398 * hardware.
3399 */
3400 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3401 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3402 exec_mask = vec1(dst);
3403 }
3404
3405 /* Quarter control has the effect of magically shifting the value of
3406 * ce0 so you'll get the first active channel relative to the
3407 * specified quarter control as result.
3408 */
3409 inst = brw_FBL(p, vec1(dst), exec_mask);
3410 } else {
3411 const struct brw_reg flag = brw_flag_reg(
3412 brw_inst_flag_reg_nr(devinfo, p->current),
3413 brw_inst_flag_subreg_nr(devinfo, p->current));
3414
3415 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3416 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3417
3418 /* Run enough instructions returning zero with execution masking and
3419 * a conditional modifier enabled in order to get the full execution
3420 * mask in f1.0. We could use a single 32-wide move here if it
3421 * weren't because of the hardware bug that causes channel enables to
3422 * be applied incorrectly to the second half of 32-wide instructions
3423 * on Gen7.
3424 */
3425 const unsigned lower_size = MIN2(16, exec_size);
3426 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3427 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3428 brw_imm_uw(0));
3429 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3430 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3431 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3432 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3433 }
3434
3435 /* Find the first bit set in the exec_size-wide portion of the flag
3436 * register that was updated by the last sequence of MOV
3437 * instructions.
3438 */
3439 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3440 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3441 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3442 }
3443 } else {
3444 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3445
3446 if (devinfo->gen >= 8 &&
3447 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3448 /* In SIMD4x2 mode the first active channel index is just the
3449 * negation of the first bit of the mask register. Note that ce0
3450 * doesn't take into account the dispatch mask, so the Gen7 path
3451 * should be used instead unless you have the guarantee that the
3452 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3453 * for some n).
3454 */
3455 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3456 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3457 brw_imm_ud(1));
3458
3459 } else {
3460 /* Overwrite the destination without and with execution masking to
3461 * find out which of the channels is active.
3462 */
3463 brw_push_insn_state(p);
3464 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3465 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3466 brw_imm_ud(1));
3467
3468 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3469 brw_imm_ud(0));
3470 brw_pop_insn_state(p);
3471 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3472 }
3473 }
3474
3475 brw_pop_insn_state(p);
3476 }
3477
3478 void
3479 brw_broadcast(struct brw_codegen *p,
3480 struct brw_reg dst,
3481 struct brw_reg src,
3482 struct brw_reg idx)
3483 {
3484 const struct gen_device_info *devinfo = p->devinfo;
3485 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3486 brw_inst *inst;
3487
3488 brw_push_insn_state(p);
3489 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3490 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3491
3492 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3493 src.address_mode == BRW_ADDRESS_DIRECT);
3494 assert(!src.abs && !src.negate);
3495 assert(src.type == dst.type);
3496
3497 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3498 idx.file == BRW_IMMEDIATE_VALUE) {
3499 /* Trivial, the source is already uniform or the index is a constant.
3500 * We will typically not get here if the optimizer is doing its job, but
3501 * asserting would be mean.
3502 */
3503 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3504 brw_MOV(p, dst,
3505 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3506 stride(suboffset(src, 4 * i), 0, 4, 1)));
3507 } else {
3508 /* From the Haswell PRM section "Register Region Restrictions":
3509 *
3510 * "The lower bits of the AddressImmediate must not overflow to
3511 * change the register address. The lower 5 bits of Address
3512 * Immediate when added to lower 5 bits of address register gives
3513 * the sub-register offset. The upper bits of Address Immediate
3514 * when added to upper bits of address register gives the register
3515 * address. Any overflow from sub-register offset is dropped."
3516 *
3517 * Fortunately, for broadcast, we never have a sub-register offset so
3518 * this isn't an issue.
3519 */
3520 assert(src.subnr == 0);
3521
3522 if (align1) {
3523 const struct brw_reg addr =
3524 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3525 unsigned offset = src.nr * REG_SIZE + src.subnr;
3526 /* Limit in bytes of the signed indirect addressing immediate. */
3527 const unsigned limit = 512;
3528
3529 brw_push_insn_state(p);
3530 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3531 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3532
3533 /* Take into account the component size and horizontal stride. */
3534 assert(src.vstride == src.hstride + src.width);
3535 brw_SHL(p, addr, vec1(idx),
3536 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3537 src.hstride - 1));
3538
3539 /* We can only address up to limit bytes using the indirect
3540 * addressing immediate, account for the difference if the source
3541 * register is above this limit.
3542 */
3543 if (offset >= limit) {
3544 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3545 offset = offset % limit;
3546 }
3547
3548 brw_pop_insn_state(p);
3549
3550 /* Use indirect addressing to fetch the specified component. */
3551 if (type_sz(src.type) > 4 &&
3552 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3553 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3554 *
3555 * "When source or destination datatype is 64b or operation is
3556 * integer DWord multiply, indirect addressing must not be
3557 * used."
3558 *
3559 * To work around both of this issue, we do two integer MOVs
3560 * insead of one 64-bit MOV. Because no double value should ever
3561 * cross a register boundary, it's safe to use the immediate
3562 * offset in the indirect here to handle adding 4 bytes to the
3563 * offset and avoid the extra ADD to the register file.
3564 */
3565 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3566 retype(brw_vec1_indirect(addr.subnr, offset),
3567 BRW_REGISTER_TYPE_D));
3568 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3569 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3570 BRW_REGISTER_TYPE_D));
3571 } else {
3572 brw_MOV(p, dst,
3573 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3574 }
3575 } else {
3576 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3577 * to all bits of a flag register,
3578 */
3579 inst = brw_MOV(p,
3580 brw_null_reg(),
3581 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3582 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3583 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3584 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3585
3586 /* and use predicated SEL to pick the right channel. */
3587 inst = brw_SEL(p, dst,
3588 stride(suboffset(src, 4), 4, 4, 1),
3589 stride(src, 4, 4, 1));
3590 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3591 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3592 }
3593 }
3594
3595 brw_pop_insn_state(p);
3596 }
3597
3598 /**
3599 * This instruction is generated as a single-channel align1 instruction by
3600 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3601 *
3602 * We can't use the typed atomic op in the FS because that has the execution
3603 * mask ANDed with the pixel mask, but we just want to write the one dword for
3604 * all the pixels.
3605 *
3606 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3607 * one u32. So we use the same untyped atomic write message as the pixel
3608 * shader.
3609 *
3610 * The untyped atomic operation requires a BUFFER surface type with RAW
3611 * format, and is only accessible through the legacy DATA_CACHE dataport
3612 * messages.
3613 */
3614 void brw_shader_time_add(struct brw_codegen *p,
3615 struct brw_reg payload,
3616 uint32_t surf_index)
3617 {
3618 const unsigned sfid = (p->devinfo->gen >= 8 || p->devinfo->is_haswell ?
3619 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3620 GEN7_SFID_DATAPORT_DATA_CACHE);
3621 assert(p->devinfo->gen >= 7);
3622
3623 brw_push_insn_state(p);
3624 brw_set_default_access_mode(p, BRW_ALIGN_1);
3625 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3626 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3627 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3628
3629 /* We use brw_vec1_reg and unmasked because we want to increment the given
3630 * offset only once.
3631 */
3632 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3633 BRW_ARF_NULL, 0));
3634 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3635 payload.nr, 0));
3636 brw_set_src1(p, send, brw_imm_ud(0));
3637 brw_set_message_descriptor(p, send, sfid, 2, 0, false, false);
3638 brw_inst_set_binding_table_index(p->devinfo, send, surf_index);
3639 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3640
3641 brw_pop_insn_state(p);
3642 }
3643
3644
3645 /**
3646 * Emit the SEND message for a barrier
3647 */
3648 void
3649 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3650 {
3651 const struct gen_device_info *devinfo = p->devinfo;
3652 struct brw_inst *inst;
3653
3654 assert(devinfo->gen >= 7);
3655
3656 brw_push_insn_state(p);
3657 brw_set_default_access_mode(p, BRW_ALIGN_1);
3658 inst = next_insn(p, BRW_OPCODE_SEND);
3659 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3660 brw_set_src0(p, inst, src);
3661 brw_set_src1(p, inst, brw_null_reg());
3662
3663 brw_set_message_descriptor(p, inst, BRW_SFID_MESSAGE_GATEWAY,
3664 1 /* msg_length */,
3665 0 /* response_length */,
3666 false /* header_present */,
3667 false /* end_of_thread */);
3668
3669 brw_inst_set_gateway_notify(devinfo, inst, 1);
3670 brw_inst_set_gateway_subfuncid(devinfo, inst,
3671 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3672
3673 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3674 brw_pop_insn_state(p);
3675 }
3676
3677
3678 /**
3679 * Emit the wait instruction for a barrier
3680 */
3681 void
3682 brw_WAIT(struct brw_codegen *p)
3683 {
3684 const struct gen_device_info *devinfo = p->devinfo;
3685 struct brw_inst *insn;
3686
3687 struct brw_reg src = brw_notification_reg();
3688
3689 insn = next_insn(p, BRW_OPCODE_WAIT);
3690 brw_set_dest(p, insn, src);
3691 brw_set_src0(p, insn, src);
3692 brw_set_src1(p, insn, brw_null_reg());
3693
3694 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3695 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3696 }
3697
3698 /**
3699 * Changes the floating point rounding mode updating the control register
3700 * field defined at cr0.0[5-6] bits. This function supports the changes to
3701 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3702 * Only RTNE and RTZ rounding are enabled at nir.
3703 */
3704 void
3705 brw_rounding_mode(struct brw_codegen *p,
3706 enum brw_rnd_mode mode)
3707 {
3708 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3709
3710 if (bits != BRW_CR0_RND_MODE_MASK) {
3711 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3712 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3713
3714 /* From the Skylake PRM, Volume 7, page 760:
3715 * "Implementation Restriction on Register Access: When the control
3716 * register is used as an explicit source and/or destination, hardware
3717 * does not ensure execution pipeline coherency. Software must set the
3718 * thread control field to ‘switch’ for an instruction that uses
3719 * control register as an explicit operand."
3720 */
3721 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3722 }
3723
3724 if (bits) {
3725 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3726 brw_imm_ud(bits));
3727 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3728 }
3729 }