intel/compiler: Add Gen11+ native float type
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Set the Message Descriptor and Extended Message Descriptor fields
364 * for SEND messages.
365 *
366 * \note This zeroes out the Function Control bits, so it must be called
367 * \b before filling out any message-specific data. Callers can
368 * choose not to fill in irrelevant bits; they will be zero.
369 */
370 void
371 brw_set_message_descriptor(struct brw_codegen *p,
372 brw_inst *inst,
373 enum brw_message_target sfid,
374 unsigned msg_length,
375 unsigned response_length,
376 bool header_present,
377 bool end_of_thread)
378 {
379 const struct gen_device_info *devinfo = p->devinfo;
380
381 brw_set_src1(p, inst, brw_imm_d(0));
382
383 /* For indirect sends, `inst` will not be the SEND/SENDC instruction
384 * itself; instead, it will be a MOV/OR into the address register.
385 *
386 * In this case, we avoid setting the extended message descriptor bits,
387 * since they go on the later SEND/SENDC instead and if set here would
388 * instead clobber the conditionalmod bits.
389 */
390 unsigned opcode = brw_inst_opcode(devinfo, inst);
391 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
392 brw_inst_set_sfid(devinfo, inst, sfid);
393 }
394
395 brw_inst_set_mlen(devinfo, inst, msg_length);
396 brw_inst_set_rlen(devinfo, inst, response_length);
397 brw_inst_set_eot(devinfo, inst, end_of_thread);
398
399 if (devinfo->gen >= 5) {
400 brw_inst_set_header_present(devinfo, inst, header_present);
401 }
402 }
403
404 static void brw_set_math_message( struct brw_codegen *p,
405 brw_inst *inst,
406 unsigned function,
407 unsigned integer_type,
408 bool low_precision,
409 unsigned dataType )
410 {
411 const struct gen_device_info *devinfo = p->devinfo;
412 unsigned msg_length;
413 unsigned response_length;
414
415 /* Infer message length from the function */
416 switch (function) {
417 case BRW_MATH_FUNCTION_POW:
418 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
419 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
420 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
421 msg_length = 2;
422 break;
423 default:
424 msg_length = 1;
425 break;
426 }
427
428 /* Infer response length from the function */
429 switch (function) {
430 case BRW_MATH_FUNCTION_SINCOS:
431 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
432 response_length = 2;
433 break;
434 default:
435 response_length = 1;
436 break;
437 }
438
439
440 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
441 msg_length, response_length, false, false);
442 brw_inst_set_math_msg_function(devinfo, inst, function);
443 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
444 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
445 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
446 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
447 brw_inst_set_saturate(devinfo, inst, 0);
448 }
449
450
451 static void brw_set_ff_sync_message(struct brw_codegen *p,
452 brw_inst *insn,
453 bool allocate,
454 unsigned response_length,
455 bool end_of_thread)
456 {
457 const struct gen_device_info *devinfo = p->devinfo;
458
459 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
460 1, response_length, true, end_of_thread);
461 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
462 brw_inst_set_urb_allocate(devinfo, insn, allocate);
463 /* The following fields are not used by FF_SYNC: */
464 brw_inst_set_urb_global_offset(devinfo, insn, 0);
465 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
466 brw_inst_set_urb_used(devinfo, insn, 0);
467 brw_inst_set_urb_complete(devinfo, insn, 0);
468 }
469
470 static void brw_set_urb_message( struct brw_codegen *p,
471 brw_inst *insn,
472 enum brw_urb_write_flags flags,
473 unsigned msg_length,
474 unsigned response_length,
475 unsigned offset,
476 unsigned swizzle_control )
477 {
478 const struct gen_device_info *devinfo = p->devinfo;
479
480 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
481 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
482 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
483
484 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
485 msg_length, response_length, true,
486 flags & BRW_URB_WRITE_EOT);
487
488 if (flags & BRW_URB_WRITE_OWORD) {
489 assert(msg_length == 2); /* header + one OWORD of data */
490 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
491 } else {
492 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
493 }
494
495 brw_inst_set_urb_global_offset(devinfo, insn, offset);
496 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
497
498 if (devinfo->gen < 8) {
499 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
500 }
501
502 if (devinfo->gen < 7) {
503 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
504 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
505 } else {
506 brw_inst_set_urb_per_slot_offset(devinfo, insn,
507 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
508 }
509 }
510
511 void
512 brw_set_dp_write_message(struct brw_codegen *p,
513 brw_inst *insn,
514 unsigned binding_table_index,
515 unsigned msg_control,
516 unsigned msg_type,
517 unsigned target_cache,
518 unsigned msg_length,
519 bool header_present,
520 unsigned last_render_target,
521 unsigned response_length,
522 unsigned end_of_thread,
523 unsigned send_commit_msg)
524 {
525 const struct gen_device_info *devinfo = p->devinfo;
526 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
527 BRW_SFID_DATAPORT_WRITE);
528
529 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
530 header_present, end_of_thread);
531
532 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
533 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
534 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
535 brw_inst_set_rt_last(devinfo, insn, last_render_target);
536 if (devinfo->gen < 7) {
537 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
538 }
539 }
540
541 void
542 brw_set_dp_read_message(struct brw_codegen *p,
543 brw_inst *insn,
544 unsigned binding_table_index,
545 unsigned msg_control,
546 unsigned msg_type,
547 unsigned target_cache,
548 unsigned msg_length,
549 bool header_present,
550 unsigned response_length)
551 {
552 const struct gen_device_info *devinfo = p->devinfo;
553 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
554 BRW_SFID_DATAPORT_READ);
555
556 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
557 header_present, false);
558
559 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
560 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
561 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
562 if (devinfo->gen < 6)
563 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
564 }
565
566 void
567 brw_set_sampler_message(struct brw_codegen *p,
568 brw_inst *inst,
569 unsigned binding_table_index,
570 unsigned sampler,
571 unsigned msg_type,
572 unsigned response_length,
573 unsigned msg_length,
574 unsigned header_present,
575 unsigned simd_mode,
576 unsigned return_format)
577 {
578 const struct gen_device_info *devinfo = p->devinfo;
579
580 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
581 response_length, header_present, false);
582
583 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
584 brw_inst_set_sampler(devinfo, inst, sampler);
585 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
586 if (devinfo->gen >= 5) {
587 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
588 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
589 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
590 }
591 }
592
593 static void
594 gen7_set_dp_scratch_message(struct brw_codegen *p,
595 brw_inst *inst,
596 bool write,
597 bool dword,
598 bool invalidate_after_read,
599 unsigned num_regs,
600 unsigned addr_offset,
601 unsigned mlen,
602 unsigned rlen,
603 bool header_present)
604 {
605 const struct gen_device_info *devinfo = p->devinfo;
606 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
607 (devinfo->gen >= 8 && num_regs == 8));
608 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
609 num_regs - 1);
610
611 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
612 mlen, rlen, header_present, false);
613 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
614 brw_inst_set_scratch_read_write(devinfo, inst, write);
615 brw_inst_set_scratch_type(devinfo, inst, dword);
616 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
617 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
618 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
619 }
620
621 #define next_insn brw_next_insn
622 brw_inst *
623 brw_next_insn(struct brw_codegen *p, unsigned opcode)
624 {
625 const struct gen_device_info *devinfo = p->devinfo;
626 brw_inst *insn;
627
628 if (p->nr_insn + 1 > p->store_size) {
629 p->store_size <<= 1;
630 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
631 }
632
633 p->next_insn_offset += 16;
634 insn = &p->store[p->nr_insn++];
635 memcpy(insn, p->current, sizeof(*insn));
636
637 brw_inst_set_opcode(devinfo, insn, opcode);
638 return insn;
639 }
640
641 static brw_inst *
642 brw_alu1(struct brw_codegen *p, unsigned opcode,
643 struct brw_reg dest, struct brw_reg src)
644 {
645 brw_inst *insn = next_insn(p, opcode);
646 brw_set_dest(p, insn, dest);
647 brw_set_src0(p, insn, src);
648 return insn;
649 }
650
651 static brw_inst *
652 brw_alu2(struct brw_codegen *p, unsigned opcode,
653 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
654 {
655 /* 64-bit immediates are only supported on 1-src instructions */
656 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
657 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
658
659 brw_inst *insn = next_insn(p, opcode);
660 brw_set_dest(p, insn, dest);
661 brw_set_src0(p, insn, src0);
662 brw_set_src1(p, insn, src1);
663 return insn;
664 }
665
666 static int
667 get_3src_subreg_nr(struct brw_reg reg)
668 {
669 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
670 * use 32-bit units (components 0..7). Since they only support F/D/UD
671 * types, this doesn't lose any flexibility, but uses fewer bits.
672 */
673 return reg.subnr / 4;
674 }
675
676 static enum gen10_align1_3src_vertical_stride
677 to_3src_align1_vstride(enum brw_vertical_stride vstride)
678 {
679 switch (vstride) {
680 case BRW_VERTICAL_STRIDE_0:
681 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
682 case BRW_VERTICAL_STRIDE_2:
683 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
684 case BRW_VERTICAL_STRIDE_4:
685 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
686 case BRW_VERTICAL_STRIDE_8:
687 case BRW_VERTICAL_STRIDE_16:
688 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
689 default:
690 unreachable("invalid vstride");
691 }
692 }
693
694
695 static enum gen10_align1_3src_src_horizontal_stride
696 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
697 {
698 switch (hstride) {
699 case BRW_HORIZONTAL_STRIDE_0:
700 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
701 case BRW_HORIZONTAL_STRIDE_1:
702 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
703 case BRW_HORIZONTAL_STRIDE_2:
704 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
705 case BRW_HORIZONTAL_STRIDE_4:
706 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
707 default:
708 unreachable("invalid hstride");
709 }
710 }
711
712 static brw_inst *
713 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
714 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
715 {
716 const struct gen_device_info *devinfo = p->devinfo;
717 brw_inst *inst = next_insn(p, opcode);
718
719 gen7_convert_mrf_to_grf(p, &dest);
720
721 assert(dest.nr < 128);
722 assert(src0.nr < 128);
723 assert(src1.nr < 128);
724 assert(src2.nr < 128);
725 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
726 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
727 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
728 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
729
730 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
731 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
732 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
733
734 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
735 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
736 BRW_ALIGN1_3SRC_ACCUMULATOR);
737 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
738 } else {
739 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
740 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
741 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
742 }
743 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
744
745 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
746
747 if (brw_reg_type_is_floating_point(dest.type)) {
748 brw_inst_set_3src_a1_exec_type(devinfo, inst,
749 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
750 } else {
751 brw_inst_set_3src_a1_exec_type(devinfo, inst,
752 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
753 }
754
755 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
756 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
757 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
758 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
759
760 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
761 to_3src_align1_vstride(src0.vstride));
762 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
763 to_3src_align1_vstride(src1.vstride));
764 /* no vstride on src2 */
765
766 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
767 to_3src_align1_hstride(src0.hstride));
768 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
769 to_3src_align1_hstride(src1.hstride));
770 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
771 to_3src_align1_hstride(src2.hstride));
772
773 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
774 if (src0.type == BRW_REGISTER_TYPE_NF) {
775 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
776 } else {
777 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
778 }
779 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
780 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
781
782 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
783 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
784 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
785 } else {
786 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
787 }
788 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
789 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
790
791 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
792 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
793 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
794 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
795
796 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
797 src0.file == BRW_IMMEDIATE_VALUE ||
798 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
799 src0.type == BRW_REGISTER_TYPE_NF));
800 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
801 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
802 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
803 src2.file == BRW_IMMEDIATE_VALUE);
804
805 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
806 src0.file == BRW_GENERAL_REGISTER_FILE ?
807 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
808 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
809 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
810 src1.file == BRW_GENERAL_REGISTER_FILE ?
811 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
812 BRW_ALIGN1_3SRC_ACCUMULATOR);
813 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
814 src2.file == BRW_GENERAL_REGISTER_FILE ?
815 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
816 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
817 } else {
818 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
819 dest.file == BRW_MESSAGE_REGISTER_FILE);
820 assert(dest.type == BRW_REGISTER_TYPE_F ||
821 dest.type == BRW_REGISTER_TYPE_DF ||
822 dest.type == BRW_REGISTER_TYPE_D ||
823 dest.type == BRW_REGISTER_TYPE_UD);
824 if (devinfo->gen == 6) {
825 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
826 dest.file == BRW_MESSAGE_REGISTER_FILE);
827 }
828 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
829 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
830 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
831
832 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
833 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
834 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
835 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
836 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
837 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
838 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
839 src0.vstride == BRW_VERTICAL_STRIDE_0);
840
841 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
842 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
843 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
844 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
845 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
846 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
847 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
848 src1.vstride == BRW_VERTICAL_STRIDE_0);
849
850 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
851 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
852 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
853 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
854 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
855 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
856 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
857 src2.vstride == BRW_VERTICAL_STRIDE_0);
858
859 if (devinfo->gen >= 7) {
860 /* Set both the source and destination types based on dest.type,
861 * ignoring the source register types. The MAD and LRP emitters ensure
862 * that all four types are float. The BFE and BFI2 emitters, however,
863 * may send us mixed D and UD types and want us to ignore that and use
864 * the destination type.
865 */
866 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
867 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
868 }
869 }
870
871 return inst;
872 }
873
874
875 /***********************************************************************
876 * Convenience routines.
877 */
878 #define ALU1(OP) \
879 brw_inst *brw_##OP(struct brw_codegen *p, \
880 struct brw_reg dest, \
881 struct brw_reg src0) \
882 { \
883 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
884 }
885
886 #define ALU2(OP) \
887 brw_inst *brw_##OP(struct brw_codegen *p, \
888 struct brw_reg dest, \
889 struct brw_reg src0, \
890 struct brw_reg src1) \
891 { \
892 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
893 }
894
895 #define ALU3(OP) \
896 brw_inst *brw_##OP(struct brw_codegen *p, \
897 struct brw_reg dest, \
898 struct brw_reg src0, \
899 struct brw_reg src1, \
900 struct brw_reg src2) \
901 { \
902 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
903 }
904
905 #define ALU3F(OP) \
906 brw_inst *brw_##OP(struct brw_codegen *p, \
907 struct brw_reg dest, \
908 struct brw_reg src0, \
909 struct brw_reg src1, \
910 struct brw_reg src2) \
911 { \
912 assert(dest.type == BRW_REGISTER_TYPE_F || \
913 dest.type == BRW_REGISTER_TYPE_DF); \
914 if (dest.type == BRW_REGISTER_TYPE_F) { \
915 assert(src0.type == BRW_REGISTER_TYPE_F); \
916 assert(src1.type == BRW_REGISTER_TYPE_F); \
917 assert(src2.type == BRW_REGISTER_TYPE_F); \
918 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
919 assert(src0.type == BRW_REGISTER_TYPE_DF); \
920 assert(src1.type == BRW_REGISTER_TYPE_DF); \
921 assert(src2.type == BRW_REGISTER_TYPE_DF); \
922 } \
923 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
924 }
925
926 /* Rounding operations (other than RNDD) require two instructions - the first
927 * stores a rounded value (possibly the wrong way) in the dest register, but
928 * also sets a per-channel "increment bit" in the flag register. A predicated
929 * add of 1.0 fixes dest to contain the desired result.
930 *
931 * Sandybridge and later appear to round correctly without an ADD.
932 */
933 #define ROUND(OP) \
934 void brw_##OP(struct brw_codegen *p, \
935 struct brw_reg dest, \
936 struct brw_reg src) \
937 { \
938 const struct gen_device_info *devinfo = p->devinfo; \
939 brw_inst *rnd, *add; \
940 rnd = next_insn(p, BRW_OPCODE_##OP); \
941 brw_set_dest(p, rnd, dest); \
942 brw_set_src0(p, rnd, src); \
943 \
944 if (devinfo->gen < 6) { \
945 /* turn on round-increments */ \
946 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
947 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
948 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
949 } \
950 }
951
952
953 ALU2(SEL)
954 ALU1(NOT)
955 ALU2(AND)
956 ALU2(OR)
957 ALU2(XOR)
958 ALU2(SHR)
959 ALU2(SHL)
960 ALU1(DIM)
961 ALU2(ASR)
962 ALU1(FRC)
963 ALU1(RNDD)
964 ALU2(MAC)
965 ALU2(MACH)
966 ALU1(LZD)
967 ALU2(DP4)
968 ALU2(DPH)
969 ALU2(DP3)
970 ALU2(DP2)
971 ALU3F(MAD)
972 ALU3F(LRP)
973 ALU1(BFREV)
974 ALU3(BFE)
975 ALU2(BFI1)
976 ALU3(BFI2)
977 ALU1(FBH)
978 ALU1(FBL)
979 ALU1(CBIT)
980 ALU2(ADDC)
981 ALU2(SUBB)
982
983 ROUND(RNDZ)
984 ROUND(RNDE)
985
986 brw_inst *
987 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
988 {
989 const struct gen_device_info *devinfo = p->devinfo;
990
991 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
992 * To avoid the problems that causes, we use a <1,2,0> source region to read
993 * each element twice.
994 */
995 if (devinfo->gen == 7 && !devinfo->is_haswell &&
996 brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1 &&
997 dest.type == BRW_REGISTER_TYPE_DF &&
998 (src0.type == BRW_REGISTER_TYPE_F ||
999 src0.type == BRW_REGISTER_TYPE_D ||
1000 src0.type == BRW_REGISTER_TYPE_UD) &&
1001 !has_scalar_region(src0)) {
1002 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
1003 src0.width == BRW_WIDTH_4 &&
1004 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1005
1006 src0.vstride = BRW_VERTICAL_STRIDE_1;
1007 src0.width = BRW_WIDTH_2;
1008 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1009 }
1010
1011 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1012 }
1013
1014 brw_inst *
1015 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1016 struct brw_reg src0, struct brw_reg src1)
1017 {
1018 /* 6.2.2: add */
1019 if (src0.type == BRW_REGISTER_TYPE_F ||
1020 (src0.file == BRW_IMMEDIATE_VALUE &&
1021 src0.type == BRW_REGISTER_TYPE_VF)) {
1022 assert(src1.type != BRW_REGISTER_TYPE_UD);
1023 assert(src1.type != BRW_REGISTER_TYPE_D);
1024 }
1025
1026 if (src1.type == BRW_REGISTER_TYPE_F ||
1027 (src1.file == BRW_IMMEDIATE_VALUE &&
1028 src1.type == BRW_REGISTER_TYPE_VF)) {
1029 assert(src0.type != BRW_REGISTER_TYPE_UD);
1030 assert(src0.type != BRW_REGISTER_TYPE_D);
1031 }
1032
1033 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1034 }
1035
1036 brw_inst *
1037 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1038 struct brw_reg src0, struct brw_reg src1)
1039 {
1040 assert(dest.type == src0.type);
1041 assert(src0.type == src1.type);
1042 switch (src0.type) {
1043 case BRW_REGISTER_TYPE_B:
1044 case BRW_REGISTER_TYPE_UB:
1045 case BRW_REGISTER_TYPE_W:
1046 case BRW_REGISTER_TYPE_UW:
1047 case BRW_REGISTER_TYPE_D:
1048 case BRW_REGISTER_TYPE_UD:
1049 break;
1050 default:
1051 unreachable("Bad type for brw_AVG");
1052 }
1053
1054 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1055 }
1056
1057 brw_inst *
1058 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1059 struct brw_reg src0, struct brw_reg src1)
1060 {
1061 /* 6.32.38: mul */
1062 if (src0.type == BRW_REGISTER_TYPE_D ||
1063 src0.type == BRW_REGISTER_TYPE_UD ||
1064 src1.type == BRW_REGISTER_TYPE_D ||
1065 src1.type == BRW_REGISTER_TYPE_UD) {
1066 assert(dest.type != BRW_REGISTER_TYPE_F);
1067 }
1068
1069 if (src0.type == BRW_REGISTER_TYPE_F ||
1070 (src0.file == BRW_IMMEDIATE_VALUE &&
1071 src0.type == BRW_REGISTER_TYPE_VF)) {
1072 assert(src1.type != BRW_REGISTER_TYPE_UD);
1073 assert(src1.type != BRW_REGISTER_TYPE_D);
1074 }
1075
1076 if (src1.type == BRW_REGISTER_TYPE_F ||
1077 (src1.file == BRW_IMMEDIATE_VALUE &&
1078 src1.type == BRW_REGISTER_TYPE_VF)) {
1079 assert(src0.type != BRW_REGISTER_TYPE_UD);
1080 assert(src0.type != BRW_REGISTER_TYPE_D);
1081 }
1082
1083 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1084 src0.nr != BRW_ARF_ACCUMULATOR);
1085 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1086 src1.nr != BRW_ARF_ACCUMULATOR);
1087
1088 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1089 }
1090
1091 brw_inst *
1092 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1093 struct brw_reg src0, struct brw_reg src1)
1094 {
1095 src0.vstride = BRW_VERTICAL_STRIDE_0;
1096 src0.width = BRW_WIDTH_1;
1097 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1098 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1099 }
1100
1101 brw_inst *
1102 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1103 struct brw_reg src0, struct brw_reg src1)
1104 {
1105 src0.vstride = BRW_VERTICAL_STRIDE_0;
1106 src0.width = BRW_WIDTH_1;
1107 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1108 src1.vstride = BRW_VERTICAL_STRIDE_8;
1109 src1.width = BRW_WIDTH_8;
1110 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1111 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1112 }
1113
1114 brw_inst *
1115 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1116 {
1117 const struct gen_device_info *devinfo = p->devinfo;
1118 const bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1119 /* The F32TO16 instruction doesn't support 32-bit destination types in
1120 * Align1 mode, and neither does the Gen8 implementation in terms of a
1121 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1122 * an undocumented feature.
1123 */
1124 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1125 (!align16 || devinfo->gen >= 8));
1126 brw_inst *inst;
1127
1128 if (align16) {
1129 assert(dst.type == BRW_REGISTER_TYPE_UD);
1130 } else {
1131 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1132 dst.type == BRW_REGISTER_TYPE_W ||
1133 dst.type == BRW_REGISTER_TYPE_UW ||
1134 dst.type == BRW_REGISTER_TYPE_HF);
1135 }
1136
1137 brw_push_insn_state(p);
1138
1139 if (needs_zero_fill) {
1140 brw_set_default_access_mode(p, BRW_ALIGN_1);
1141 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1142 }
1143
1144 if (devinfo->gen >= 8) {
1145 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1146 } else {
1147 assert(devinfo->gen == 7);
1148 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1149 }
1150
1151 if (needs_zero_fill) {
1152 brw_inst_set_no_dd_clear(devinfo, inst, true);
1153 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1154 brw_inst_set_no_dd_check(devinfo, inst, true);
1155 }
1156
1157 brw_pop_insn_state(p);
1158 return inst;
1159 }
1160
1161 brw_inst *
1162 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1163 {
1164 const struct gen_device_info *devinfo = p->devinfo;
1165 bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1166
1167 if (align16) {
1168 assert(src.type == BRW_REGISTER_TYPE_UD);
1169 } else {
1170 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1171 *
1172 * Because this instruction does not have a 16-bit floating-point
1173 * type, the source data type must be Word (W). The destination type
1174 * must be F (Float).
1175 */
1176 if (src.type == BRW_REGISTER_TYPE_UD)
1177 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1178
1179 assert(src.type == BRW_REGISTER_TYPE_W ||
1180 src.type == BRW_REGISTER_TYPE_UW ||
1181 src.type == BRW_REGISTER_TYPE_HF);
1182 }
1183
1184 if (devinfo->gen >= 8) {
1185 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1186 } else {
1187 assert(devinfo->gen == 7);
1188 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1189 }
1190 }
1191
1192
1193 void brw_NOP(struct brw_codegen *p)
1194 {
1195 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1196 memset(insn, 0, sizeof(*insn));
1197 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1198 }
1199
1200
1201
1202
1203
1204 /***********************************************************************
1205 * Comparisons, if/else/endif
1206 */
1207
1208 brw_inst *
1209 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1210 unsigned predicate_control)
1211 {
1212 const struct gen_device_info *devinfo = p->devinfo;
1213 struct brw_reg ip = brw_ip_reg();
1214 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1215
1216 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1217 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1218 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1219 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1220
1221 return inst;
1222 }
1223
1224 static void
1225 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1226 {
1227 p->if_stack[p->if_stack_depth] = inst - p->store;
1228
1229 p->if_stack_depth++;
1230 if (p->if_stack_array_size <= p->if_stack_depth) {
1231 p->if_stack_array_size *= 2;
1232 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1233 p->if_stack_array_size);
1234 }
1235 }
1236
1237 static brw_inst *
1238 pop_if_stack(struct brw_codegen *p)
1239 {
1240 p->if_stack_depth--;
1241 return &p->store[p->if_stack[p->if_stack_depth]];
1242 }
1243
1244 static void
1245 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1246 {
1247 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1248 p->loop_stack_array_size *= 2;
1249 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1250 p->loop_stack_array_size);
1251 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1252 p->loop_stack_array_size);
1253 }
1254
1255 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1256 p->loop_stack_depth++;
1257 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1258 }
1259
1260 static brw_inst *
1261 get_inner_do_insn(struct brw_codegen *p)
1262 {
1263 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1264 }
1265
1266 /* EU takes the value from the flag register and pushes it onto some
1267 * sort of a stack (presumably merging with any flag value already on
1268 * the stack). Within an if block, the flags at the top of the stack
1269 * control execution on each channel of the unit, eg. on each of the
1270 * 16 pixel values in our wm programs.
1271 *
1272 * When the matching 'else' instruction is reached (presumably by
1273 * countdown of the instruction count patched in by our ELSE/ENDIF
1274 * functions), the relevant flags are inverted.
1275 *
1276 * When the matching 'endif' instruction is reached, the flags are
1277 * popped off. If the stack is now empty, normal execution resumes.
1278 */
1279 brw_inst *
1280 brw_IF(struct brw_codegen *p, unsigned execute_size)
1281 {
1282 const struct gen_device_info *devinfo = p->devinfo;
1283 brw_inst *insn;
1284
1285 insn = next_insn(p, BRW_OPCODE_IF);
1286
1287 /* Override the defaults for this instruction:
1288 */
1289 if (devinfo->gen < 6) {
1290 brw_set_dest(p, insn, brw_ip_reg());
1291 brw_set_src0(p, insn, brw_ip_reg());
1292 brw_set_src1(p, insn, brw_imm_d(0x0));
1293 } else if (devinfo->gen == 6) {
1294 brw_set_dest(p, insn, brw_imm_w(0));
1295 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1296 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1297 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1298 } else if (devinfo->gen == 7) {
1299 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1300 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1301 brw_set_src1(p, insn, brw_imm_w(0));
1302 brw_inst_set_jip(devinfo, insn, 0);
1303 brw_inst_set_uip(devinfo, insn, 0);
1304 } else {
1305 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1306 brw_set_src0(p, insn, brw_imm_d(0));
1307 brw_inst_set_jip(devinfo, insn, 0);
1308 brw_inst_set_uip(devinfo, insn, 0);
1309 }
1310
1311 brw_inst_set_exec_size(devinfo, insn, execute_size);
1312 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1313 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1314 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1315 if (!p->single_program_flow && devinfo->gen < 6)
1316 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1317
1318 push_if_stack(p, insn);
1319 p->if_depth_in_loop[p->loop_stack_depth]++;
1320 return insn;
1321 }
1322
1323 /* This function is only used for gen6-style IF instructions with an
1324 * embedded comparison (conditional modifier). It is not used on gen7.
1325 */
1326 brw_inst *
1327 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1328 struct brw_reg src0, struct brw_reg src1)
1329 {
1330 const struct gen_device_info *devinfo = p->devinfo;
1331 brw_inst *insn;
1332
1333 insn = next_insn(p, BRW_OPCODE_IF);
1334
1335 brw_set_dest(p, insn, brw_imm_w(0));
1336 brw_inst_set_exec_size(devinfo, insn,
1337 brw_inst_exec_size(devinfo, p->current));
1338 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1339 brw_set_src0(p, insn, src0);
1340 brw_set_src1(p, insn, src1);
1341
1342 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1343 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1344 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1345
1346 push_if_stack(p, insn);
1347 return insn;
1348 }
1349
1350 /**
1351 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1352 */
1353 static void
1354 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1355 brw_inst *if_inst, brw_inst *else_inst)
1356 {
1357 const struct gen_device_info *devinfo = p->devinfo;
1358
1359 /* The next instruction (where the ENDIF would be, if it existed) */
1360 brw_inst *next_inst = &p->store[p->nr_insn];
1361
1362 assert(p->single_program_flow);
1363 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1364 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1365 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1366
1367 /* Convert IF to an ADD instruction that moves the instruction pointer
1368 * to the first instruction of the ELSE block. If there is no ELSE
1369 * block, point to where ENDIF would be. Reverse the predicate.
1370 *
1371 * There's no need to execute an ENDIF since we don't need to do any
1372 * stack operations, and if we're currently executing, we just want to
1373 * continue normally.
1374 */
1375 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1376 brw_inst_set_pred_inv(devinfo, if_inst, true);
1377
1378 if (else_inst != NULL) {
1379 /* Convert ELSE to an ADD instruction that points where the ENDIF
1380 * would be.
1381 */
1382 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1383
1384 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1385 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1386 } else {
1387 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1388 }
1389 }
1390
1391 /**
1392 * Patch IF and ELSE instructions with appropriate jump targets.
1393 */
1394 static void
1395 patch_IF_ELSE(struct brw_codegen *p,
1396 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1397 {
1398 const struct gen_device_info *devinfo = p->devinfo;
1399
1400 /* We shouldn't be patching IF and ELSE instructions in single program flow
1401 * mode when gen < 6, because in single program flow mode on those
1402 * platforms, we convert flow control instructions to conditional ADDs that
1403 * operate on IP (see brw_ENDIF).
1404 *
1405 * However, on Gen6, writing to IP doesn't work in single program flow mode
1406 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1407 * not be updated by non-flow control instructions."). And on later
1408 * platforms, there is no significant benefit to converting control flow
1409 * instructions to conditional ADDs. So we do patch IF and ELSE
1410 * instructions in single program flow mode on those platforms.
1411 */
1412 if (devinfo->gen < 6)
1413 assert(!p->single_program_flow);
1414
1415 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1416 assert(endif_inst != NULL);
1417 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1418
1419 unsigned br = brw_jump_scale(devinfo);
1420
1421 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1422 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1423
1424 if (else_inst == NULL) {
1425 /* Patch IF -> ENDIF */
1426 if (devinfo->gen < 6) {
1427 /* Turn it into an IFF, which means no mask stack operations for
1428 * all-false and jumping past the ENDIF.
1429 */
1430 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1431 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1432 br * (endif_inst - if_inst + 1));
1433 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1434 } else if (devinfo->gen == 6) {
1435 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1436 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1437 } else {
1438 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1439 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1440 }
1441 } else {
1442 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1443
1444 /* Patch IF -> ELSE */
1445 if (devinfo->gen < 6) {
1446 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1447 br * (else_inst - if_inst));
1448 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1449 } else if (devinfo->gen == 6) {
1450 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1451 br * (else_inst - if_inst + 1));
1452 }
1453
1454 /* Patch ELSE -> ENDIF */
1455 if (devinfo->gen < 6) {
1456 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1457 * matching ENDIF.
1458 */
1459 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1460 br * (endif_inst - else_inst + 1));
1461 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1462 } else if (devinfo->gen == 6) {
1463 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1464 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1465 br * (endif_inst - else_inst));
1466 } else {
1467 /* The IF instruction's JIP should point just past the ELSE */
1468 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1469 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1470 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1471 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1472 if (devinfo->gen >= 8) {
1473 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1474 * should point to ENDIF.
1475 */
1476 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1477 }
1478 }
1479 }
1480 }
1481
1482 void
1483 brw_ELSE(struct brw_codegen *p)
1484 {
1485 const struct gen_device_info *devinfo = p->devinfo;
1486 brw_inst *insn;
1487
1488 insn = next_insn(p, BRW_OPCODE_ELSE);
1489
1490 if (devinfo->gen < 6) {
1491 brw_set_dest(p, insn, brw_ip_reg());
1492 brw_set_src0(p, insn, brw_ip_reg());
1493 brw_set_src1(p, insn, brw_imm_d(0x0));
1494 } else if (devinfo->gen == 6) {
1495 brw_set_dest(p, insn, brw_imm_w(0));
1496 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1497 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1498 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1499 } else if (devinfo->gen == 7) {
1500 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1501 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1502 brw_set_src1(p, insn, brw_imm_w(0));
1503 brw_inst_set_jip(devinfo, insn, 0);
1504 brw_inst_set_uip(devinfo, insn, 0);
1505 } else {
1506 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1507 brw_set_src0(p, insn, brw_imm_d(0));
1508 brw_inst_set_jip(devinfo, insn, 0);
1509 brw_inst_set_uip(devinfo, insn, 0);
1510 }
1511
1512 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1513 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1514 if (!p->single_program_flow && devinfo->gen < 6)
1515 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1516
1517 push_if_stack(p, insn);
1518 }
1519
1520 void
1521 brw_ENDIF(struct brw_codegen *p)
1522 {
1523 const struct gen_device_info *devinfo = p->devinfo;
1524 brw_inst *insn = NULL;
1525 brw_inst *else_inst = NULL;
1526 brw_inst *if_inst = NULL;
1527 brw_inst *tmp;
1528 bool emit_endif = true;
1529
1530 /* In single program flow mode, we can express IF and ELSE instructions
1531 * equivalently as ADD instructions that operate on IP. On platforms prior
1532 * to Gen6, flow control instructions cause an implied thread switch, so
1533 * this is a significant savings.
1534 *
1535 * However, on Gen6, writing to IP doesn't work in single program flow mode
1536 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1537 * not be updated by non-flow control instructions."). And on later
1538 * platforms, there is no significant benefit to converting control flow
1539 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1540 * Gen5.
1541 */
1542 if (devinfo->gen < 6 && p->single_program_flow)
1543 emit_endif = false;
1544
1545 /*
1546 * A single next_insn() may change the base address of instruction store
1547 * memory(p->store), so call it first before referencing the instruction
1548 * store pointer from an index
1549 */
1550 if (emit_endif)
1551 insn = next_insn(p, BRW_OPCODE_ENDIF);
1552
1553 /* Pop the IF and (optional) ELSE instructions from the stack */
1554 p->if_depth_in_loop[p->loop_stack_depth]--;
1555 tmp = pop_if_stack(p);
1556 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1557 else_inst = tmp;
1558 tmp = pop_if_stack(p);
1559 }
1560 if_inst = tmp;
1561
1562 if (!emit_endif) {
1563 /* ENDIF is useless; don't bother emitting it. */
1564 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1565 return;
1566 }
1567
1568 if (devinfo->gen < 6) {
1569 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1570 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1571 brw_set_src1(p, insn, brw_imm_d(0x0));
1572 } else if (devinfo->gen == 6) {
1573 brw_set_dest(p, insn, brw_imm_w(0));
1574 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1575 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1576 } else if (devinfo->gen == 7) {
1577 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1578 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1579 brw_set_src1(p, insn, brw_imm_w(0));
1580 } else {
1581 brw_set_src0(p, insn, brw_imm_d(0));
1582 }
1583
1584 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1585 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1586 if (devinfo->gen < 6)
1587 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1588
1589 /* Also pop item off the stack in the endif instruction: */
1590 if (devinfo->gen < 6) {
1591 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1592 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1593 } else if (devinfo->gen == 6) {
1594 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1595 } else {
1596 brw_inst_set_jip(devinfo, insn, 2);
1597 }
1598 patch_IF_ELSE(p, if_inst, else_inst, insn);
1599 }
1600
1601 brw_inst *
1602 brw_BREAK(struct brw_codegen *p)
1603 {
1604 const struct gen_device_info *devinfo = p->devinfo;
1605 brw_inst *insn;
1606
1607 insn = next_insn(p, BRW_OPCODE_BREAK);
1608 if (devinfo->gen >= 8) {
1609 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1610 brw_set_src0(p, insn, brw_imm_d(0x0));
1611 } else if (devinfo->gen >= 6) {
1612 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1613 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1614 brw_set_src1(p, insn, brw_imm_d(0x0));
1615 } else {
1616 brw_set_dest(p, insn, brw_ip_reg());
1617 brw_set_src0(p, insn, brw_ip_reg());
1618 brw_set_src1(p, insn, brw_imm_d(0x0));
1619 brw_inst_set_gen4_pop_count(devinfo, insn,
1620 p->if_depth_in_loop[p->loop_stack_depth]);
1621 }
1622 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1623 brw_inst_set_exec_size(devinfo, insn,
1624 brw_inst_exec_size(devinfo, p->current));
1625
1626 return insn;
1627 }
1628
1629 brw_inst *
1630 brw_CONT(struct brw_codegen *p)
1631 {
1632 const struct gen_device_info *devinfo = p->devinfo;
1633 brw_inst *insn;
1634
1635 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1636 brw_set_dest(p, insn, brw_ip_reg());
1637 if (devinfo->gen >= 8) {
1638 brw_set_src0(p, insn, brw_imm_d(0x0));
1639 } else {
1640 brw_set_src0(p, insn, brw_ip_reg());
1641 brw_set_src1(p, insn, brw_imm_d(0x0));
1642 }
1643
1644 if (devinfo->gen < 6) {
1645 brw_inst_set_gen4_pop_count(devinfo, insn,
1646 p->if_depth_in_loop[p->loop_stack_depth]);
1647 }
1648 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1649 brw_inst_set_exec_size(devinfo, insn,
1650 brw_inst_exec_size(devinfo, p->current));
1651 return insn;
1652 }
1653
1654 brw_inst *
1655 gen6_HALT(struct brw_codegen *p)
1656 {
1657 const struct gen_device_info *devinfo = p->devinfo;
1658 brw_inst *insn;
1659
1660 insn = next_insn(p, BRW_OPCODE_HALT);
1661 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1662 if (devinfo->gen >= 8) {
1663 brw_set_src0(p, insn, brw_imm_d(0x0));
1664 } else {
1665 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1666 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1667 }
1668
1669 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1670 brw_inst_set_exec_size(devinfo, insn,
1671 brw_inst_exec_size(devinfo, p->current));
1672 return insn;
1673 }
1674
1675 /* DO/WHILE loop:
1676 *
1677 * The DO/WHILE is just an unterminated loop -- break or continue are
1678 * used for control within the loop. We have a few ways they can be
1679 * done.
1680 *
1681 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1682 * jip and no DO instruction.
1683 *
1684 * For non-uniform control flow pre-gen6, there's a DO instruction to
1685 * push the mask, and a WHILE to jump back, and BREAK to get out and
1686 * pop the mask.
1687 *
1688 * For gen6, there's no more mask stack, so no need for DO. WHILE
1689 * just points back to the first instruction of the loop.
1690 */
1691 brw_inst *
1692 brw_DO(struct brw_codegen *p, unsigned execute_size)
1693 {
1694 const struct gen_device_info *devinfo = p->devinfo;
1695
1696 if (devinfo->gen >= 6 || p->single_program_flow) {
1697 push_loop_stack(p, &p->store[p->nr_insn]);
1698 return &p->store[p->nr_insn];
1699 } else {
1700 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1701
1702 push_loop_stack(p, insn);
1703
1704 /* Override the defaults for this instruction:
1705 */
1706 brw_set_dest(p, insn, brw_null_reg());
1707 brw_set_src0(p, insn, brw_null_reg());
1708 brw_set_src1(p, insn, brw_null_reg());
1709
1710 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1711 brw_inst_set_exec_size(devinfo, insn, execute_size);
1712 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1713
1714 return insn;
1715 }
1716 }
1717
1718 /**
1719 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1720 * instruction here.
1721 *
1722 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1723 * nesting, since it can always just point to the end of the block/current loop.
1724 */
1725 static void
1726 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1727 {
1728 const struct gen_device_info *devinfo = p->devinfo;
1729 brw_inst *do_inst = get_inner_do_insn(p);
1730 brw_inst *inst;
1731 unsigned br = brw_jump_scale(devinfo);
1732
1733 assert(devinfo->gen < 6);
1734
1735 for (inst = while_inst - 1; inst != do_inst; inst--) {
1736 /* If the jump count is != 0, that means that this instruction has already
1737 * been patched because it's part of a loop inside of the one we're
1738 * patching.
1739 */
1740 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1741 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1742 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1743 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1744 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1745 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1746 }
1747 }
1748 }
1749
1750 brw_inst *
1751 brw_WHILE(struct brw_codegen *p)
1752 {
1753 const struct gen_device_info *devinfo = p->devinfo;
1754 brw_inst *insn, *do_insn;
1755 unsigned br = brw_jump_scale(devinfo);
1756
1757 if (devinfo->gen >= 6) {
1758 insn = next_insn(p, BRW_OPCODE_WHILE);
1759 do_insn = get_inner_do_insn(p);
1760
1761 if (devinfo->gen >= 8) {
1762 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1763 brw_set_src0(p, insn, brw_imm_d(0));
1764 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1765 } else if (devinfo->gen == 7) {
1766 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1767 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1768 brw_set_src1(p, insn, brw_imm_w(0));
1769 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1770 } else {
1771 brw_set_dest(p, insn, brw_imm_w(0));
1772 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1773 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1774 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1775 }
1776
1777 brw_inst_set_exec_size(devinfo, insn,
1778 brw_inst_exec_size(devinfo, p->current));
1779
1780 } else {
1781 if (p->single_program_flow) {
1782 insn = next_insn(p, BRW_OPCODE_ADD);
1783 do_insn = get_inner_do_insn(p);
1784
1785 brw_set_dest(p, insn, brw_ip_reg());
1786 brw_set_src0(p, insn, brw_ip_reg());
1787 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1788 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1789 } else {
1790 insn = next_insn(p, BRW_OPCODE_WHILE);
1791 do_insn = get_inner_do_insn(p);
1792
1793 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1794
1795 brw_set_dest(p, insn, brw_ip_reg());
1796 brw_set_src0(p, insn, brw_ip_reg());
1797 brw_set_src1(p, insn, brw_imm_d(0));
1798
1799 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1800 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1801 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1802
1803 brw_patch_break_cont(p, insn);
1804 }
1805 }
1806 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1807
1808 p->loop_stack_depth--;
1809
1810 return insn;
1811 }
1812
1813 /* FORWARD JUMPS:
1814 */
1815 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1816 {
1817 const struct gen_device_info *devinfo = p->devinfo;
1818 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1819 unsigned jmpi = 1;
1820
1821 if (devinfo->gen >= 5)
1822 jmpi = 2;
1823
1824 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1825 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1826
1827 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1828 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1829 }
1830
1831 /* To integrate with the above, it makes sense that the comparison
1832 * instruction should populate the flag register. It might be simpler
1833 * just to use the flag reg for most WM tasks?
1834 */
1835 void brw_CMP(struct brw_codegen *p,
1836 struct brw_reg dest,
1837 unsigned conditional,
1838 struct brw_reg src0,
1839 struct brw_reg src1)
1840 {
1841 const struct gen_device_info *devinfo = p->devinfo;
1842 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1843
1844 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1845 brw_set_dest(p, insn, dest);
1846 brw_set_src0(p, insn, src0);
1847 brw_set_src1(p, insn, src1);
1848
1849 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1850 * page says:
1851 * "Any CMP instruction with a null destination must use a {switch}."
1852 *
1853 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1854 * mentioned on their work-arounds pages.
1855 */
1856 if (devinfo->gen == 7) {
1857 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1858 dest.nr == BRW_ARF_NULL) {
1859 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1860 }
1861 }
1862 }
1863
1864 /***********************************************************************
1865 * Helpers for the various SEND message types:
1866 */
1867
1868 /** Extended math function, float[8].
1869 */
1870 void gen4_math(struct brw_codegen *p,
1871 struct brw_reg dest,
1872 unsigned function,
1873 unsigned msg_reg_nr,
1874 struct brw_reg src,
1875 unsigned precision )
1876 {
1877 const struct gen_device_info *devinfo = p->devinfo;
1878 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1879 unsigned data_type;
1880 if (has_scalar_region(src)) {
1881 data_type = BRW_MATH_DATA_SCALAR;
1882 } else {
1883 data_type = BRW_MATH_DATA_VECTOR;
1884 }
1885
1886 assert(devinfo->gen < 6);
1887
1888 /* Example code doesn't set predicate_control for send
1889 * instructions.
1890 */
1891 brw_inst_set_pred_control(devinfo, insn, 0);
1892 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1893
1894 brw_set_dest(p, insn, dest);
1895 brw_set_src0(p, insn, src);
1896 brw_set_math_message(p,
1897 insn,
1898 function,
1899 src.type == BRW_REGISTER_TYPE_D,
1900 precision,
1901 data_type);
1902 }
1903
1904 void gen6_math(struct brw_codegen *p,
1905 struct brw_reg dest,
1906 unsigned function,
1907 struct brw_reg src0,
1908 struct brw_reg src1)
1909 {
1910 const struct gen_device_info *devinfo = p->devinfo;
1911 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1912
1913 assert(devinfo->gen >= 6);
1914
1915 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1916 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1917
1918 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1919 if (devinfo->gen == 6) {
1920 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1921 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1922 }
1923
1924 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1925 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1926 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1927 assert(src0.type != BRW_REGISTER_TYPE_F);
1928 assert(src1.type != BRW_REGISTER_TYPE_F);
1929 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1930 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1931 } else {
1932 assert(src0.type == BRW_REGISTER_TYPE_F);
1933 assert(src1.type == BRW_REGISTER_TYPE_F);
1934 }
1935
1936 /* Source modifiers are ignored for extended math instructions on Gen6. */
1937 if (devinfo->gen == 6) {
1938 assert(!src0.negate);
1939 assert(!src0.abs);
1940 assert(!src1.negate);
1941 assert(!src1.abs);
1942 }
1943
1944 brw_inst_set_math_function(devinfo, insn, function);
1945
1946 brw_set_dest(p, insn, dest);
1947 brw_set_src0(p, insn, src0);
1948 brw_set_src1(p, insn, src1);
1949 }
1950
1951 /**
1952 * Return the right surface index to access the thread scratch space using
1953 * stateless dataport messages.
1954 */
1955 unsigned
1956 brw_scratch_surface_idx(const struct brw_codegen *p)
1957 {
1958 /* The scratch space is thread-local so IA coherency is unnecessary. */
1959 if (p->devinfo->gen >= 8)
1960 return GEN8_BTI_STATELESS_NON_COHERENT;
1961 else
1962 return BRW_BTI_STATELESS;
1963 }
1964
1965 /**
1966 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1967 * using a constant offset per channel.
1968 *
1969 * The offset must be aligned to oword size (16 bytes). Used for
1970 * register spilling.
1971 */
1972 void brw_oword_block_write_scratch(struct brw_codegen *p,
1973 struct brw_reg mrf,
1974 int num_regs,
1975 unsigned offset)
1976 {
1977 const struct gen_device_info *devinfo = p->devinfo;
1978 const unsigned target_cache =
1979 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1980 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1981 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
1982 uint32_t msg_type;
1983
1984 if (devinfo->gen >= 6)
1985 offset /= 16;
1986
1987 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1988
1989 const unsigned mlen = 1 + num_regs;
1990
1991 /* Set up the message header. This is g0, with g0.2 filled with
1992 * the offset. We don't want to leave our offset around in g0 or
1993 * it'll screw up texture samples, so set it up inside the message
1994 * reg.
1995 */
1996 {
1997 brw_push_insn_state(p);
1998 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1999 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2000 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2001
2002 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2003
2004 /* set message header global offset field (reg 0, element 2) */
2005 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2006 brw_MOV(p,
2007 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2008 mrf.nr,
2009 2), BRW_REGISTER_TYPE_UD),
2010 brw_imm_ud(offset));
2011
2012 brw_pop_insn_state(p);
2013 }
2014
2015 {
2016 struct brw_reg dest;
2017 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2018 int send_commit_msg;
2019 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2020 BRW_REGISTER_TYPE_UW);
2021
2022 brw_inst_set_compression(devinfo, insn, false);
2023
2024 if (brw_inst_exec_size(devinfo, insn) >= 16)
2025 src_header = vec16(src_header);
2026
2027 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2028 if (devinfo->gen < 6)
2029 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2030
2031 /* Until gen6, writes followed by reads from the same location
2032 * are not guaranteed to be ordered unless write_commit is set.
2033 * If set, then a no-op write is issued to the destination
2034 * register to set a dependency, and a read from the destination
2035 * can be used to ensure the ordering.
2036 *
2037 * For gen6, only writes between different threads need ordering
2038 * protection. Our use of DP writes is all about register
2039 * spilling within a thread.
2040 */
2041 if (devinfo->gen >= 6) {
2042 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2043 send_commit_msg = 0;
2044 } else {
2045 dest = src_header;
2046 send_commit_msg = 1;
2047 }
2048
2049 brw_set_dest(p, insn, dest);
2050 if (devinfo->gen >= 6) {
2051 brw_set_src0(p, insn, mrf);
2052 } else {
2053 brw_set_src0(p, insn, brw_null_reg());
2054 }
2055
2056 if (devinfo->gen >= 6)
2057 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2058 else
2059 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2060
2061 brw_set_dp_write_message(p,
2062 insn,
2063 brw_scratch_surface_idx(p),
2064 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2065 msg_type,
2066 target_cache,
2067 mlen,
2068 true, /* header_present */
2069 0, /* not a render target */
2070 send_commit_msg, /* response_length */
2071 0, /* eot */
2072 send_commit_msg);
2073 }
2074 }
2075
2076
2077 /**
2078 * Read a block of owords (half a GRF each) from the scratch buffer
2079 * using a constant index per channel.
2080 *
2081 * Offset must be aligned to oword size (16 bytes). Used for register
2082 * spilling.
2083 */
2084 void
2085 brw_oword_block_read_scratch(struct brw_codegen *p,
2086 struct brw_reg dest,
2087 struct brw_reg mrf,
2088 int num_regs,
2089 unsigned offset)
2090 {
2091 const struct gen_device_info *devinfo = p->devinfo;
2092
2093 if (devinfo->gen >= 6)
2094 offset /= 16;
2095
2096 if (p->devinfo->gen >= 7) {
2097 /* On gen 7 and above, we no longer have message registers and we can
2098 * send from any register we want. By using the destination register
2099 * for the message, we guarantee that the implied message write won't
2100 * accidentally overwrite anything. This has been a problem because
2101 * the MRF registers and source for the final FB write are both fixed
2102 * and may overlap.
2103 */
2104 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2105 } else {
2106 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2107 }
2108 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2109
2110 const unsigned rlen = num_regs;
2111 const unsigned target_cache =
2112 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2113 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2114 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2115
2116 {
2117 brw_push_insn_state(p);
2118 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2119 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2120 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2121
2122 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2123
2124 /* set message header global offset field (reg 0, element 2) */
2125 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2126 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2127
2128 brw_pop_insn_state(p);
2129 }
2130
2131 {
2132 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2133
2134 assert(brw_inst_pred_control(devinfo, insn) == 0);
2135 brw_inst_set_compression(devinfo, insn, false);
2136
2137 brw_set_dest(p, insn, dest); /* UW? */
2138 if (devinfo->gen >= 6) {
2139 brw_set_src0(p, insn, mrf);
2140 } else {
2141 brw_set_src0(p, insn, brw_null_reg());
2142 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2143 }
2144
2145 brw_set_dp_read_message(p,
2146 insn,
2147 brw_scratch_surface_idx(p),
2148 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2149 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2150 target_cache,
2151 1, /* msg_length */
2152 true, /* header_present */
2153 rlen);
2154 }
2155 }
2156
2157 void
2158 gen7_block_read_scratch(struct brw_codegen *p,
2159 struct brw_reg dest,
2160 int num_regs,
2161 unsigned offset)
2162 {
2163 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2164 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2165
2166 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2167
2168 /* The HW requires that the header is present; this is to get the g0.5
2169 * scratch offset.
2170 */
2171 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2172
2173 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2174 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2175 * is 32 bytes, which happens to be the size of a register.
2176 */
2177 offset /= REG_SIZE;
2178 assert(offset < (1 << 12));
2179
2180 gen7_set_dp_scratch_message(p, insn,
2181 false, /* scratch read */
2182 false, /* OWords */
2183 false, /* invalidate after read */
2184 num_regs,
2185 offset,
2186 1, /* mlen: just g0 */
2187 num_regs, /* rlen */
2188 true); /* header present */
2189 }
2190
2191 /**
2192 * Read float[4] vectors from the data port constant cache.
2193 * Location (in buffer) should be a multiple of 16.
2194 * Used for fetching shader constants.
2195 */
2196 void brw_oword_block_read(struct brw_codegen *p,
2197 struct brw_reg dest,
2198 struct brw_reg mrf,
2199 uint32_t offset,
2200 uint32_t bind_table_index)
2201 {
2202 const struct gen_device_info *devinfo = p->devinfo;
2203 const unsigned target_cache =
2204 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2205 BRW_DATAPORT_READ_TARGET_DATA_CACHE);
2206 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
2207
2208 /* On newer hardware, offset is in units of owords. */
2209 if (devinfo->gen >= 6)
2210 offset /= 16;
2211
2212 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2213
2214 brw_push_insn_state(p);
2215 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2216 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2217 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2218
2219 brw_push_insn_state(p);
2220 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2221 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2222
2223 /* set message header global offset field (reg 0, element 2) */
2224 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2225 brw_MOV(p,
2226 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2227 mrf.nr,
2228 2), BRW_REGISTER_TYPE_UD),
2229 brw_imm_ud(offset));
2230 brw_pop_insn_state(p);
2231
2232 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2233
2234 /* cast dest to a uword[8] vector */
2235 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2236
2237 brw_set_dest(p, insn, dest);
2238 if (devinfo->gen >= 6) {
2239 brw_set_src0(p, insn, mrf);
2240 } else {
2241 brw_set_src0(p, insn, brw_null_reg());
2242 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2243 }
2244
2245 brw_set_dp_read_message(p, insn, bind_table_index,
2246 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2247 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2248 target_cache,
2249 1, /* msg_length */
2250 true, /* header_present */
2251 DIV_ROUND_UP(exec_size, 8)); /* response_length */
2252
2253 brw_pop_insn_state(p);
2254 }
2255
2256
2257 void brw_fb_WRITE(struct brw_codegen *p,
2258 struct brw_reg payload,
2259 struct brw_reg implied_header,
2260 unsigned msg_control,
2261 unsigned binding_table_index,
2262 unsigned msg_length,
2263 unsigned response_length,
2264 bool eot,
2265 bool last_render_target,
2266 bool header_present)
2267 {
2268 const struct gen_device_info *devinfo = p->devinfo;
2269 const unsigned target_cache =
2270 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2271 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2272 brw_inst *insn;
2273 unsigned msg_type;
2274 struct brw_reg dest, src0;
2275
2276 if (brw_inst_exec_size(devinfo, p->current) >= BRW_EXECUTE_16)
2277 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2278 else
2279 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2280
2281 if (devinfo->gen >= 6) {
2282 insn = next_insn(p, BRW_OPCODE_SENDC);
2283 } else {
2284 insn = next_insn(p, BRW_OPCODE_SEND);
2285 }
2286 brw_inst_set_compression(devinfo, insn, false);
2287
2288 if (devinfo->gen >= 6) {
2289 /* headerless version, just submit color payload */
2290 src0 = payload;
2291
2292 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2293 } else {
2294 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2295 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2296 src0 = implied_header;
2297
2298 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2299 }
2300
2301 brw_set_dest(p, insn, dest);
2302 brw_set_src0(p, insn, src0);
2303 brw_set_dp_write_message(p,
2304 insn,
2305 binding_table_index,
2306 msg_control,
2307 msg_type,
2308 target_cache,
2309 msg_length,
2310 header_present,
2311 last_render_target,
2312 response_length,
2313 eot,
2314 0 /* send_commit_msg */);
2315 }
2316
2317 brw_inst *
2318 gen9_fb_READ(struct brw_codegen *p,
2319 struct brw_reg dst,
2320 struct brw_reg payload,
2321 unsigned binding_table_index,
2322 unsigned msg_length,
2323 unsigned response_length,
2324 bool per_sample)
2325 {
2326 const struct gen_device_info *devinfo = p->devinfo;
2327 assert(devinfo->gen >= 9);
2328 const unsigned msg_subtype =
2329 brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16 ? 0 : 1;
2330 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2331
2332 brw_set_dest(p, insn, dst);
2333 brw_set_src0(p, insn, payload);
2334 brw_set_dp_read_message(p, insn, binding_table_index,
2335 per_sample << 5 | msg_subtype,
2336 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2337 GEN6_SFID_DATAPORT_RENDER_CACHE,
2338 msg_length, true /* header_present */,
2339 response_length);
2340 brw_inst_set_rt_slot_group(devinfo, insn,
2341 brw_inst_qtr_control(devinfo, p->current) / 2);
2342
2343 return insn;
2344 }
2345
2346 /**
2347 * Texture sample instruction.
2348 * Note: the msg_type plus msg_length values determine exactly what kind
2349 * of sampling operation is performed. See volume 4, page 161 of docs.
2350 */
2351 void brw_SAMPLE(struct brw_codegen *p,
2352 struct brw_reg dest,
2353 unsigned msg_reg_nr,
2354 struct brw_reg src0,
2355 unsigned binding_table_index,
2356 unsigned sampler,
2357 unsigned msg_type,
2358 unsigned response_length,
2359 unsigned msg_length,
2360 unsigned header_present,
2361 unsigned simd_mode,
2362 unsigned return_format)
2363 {
2364 const struct gen_device_info *devinfo = p->devinfo;
2365 brw_inst *insn;
2366
2367 if (msg_reg_nr != -1)
2368 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2369
2370 insn = next_insn(p, BRW_OPCODE_SEND);
2371 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2372
2373 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2374 *
2375 * "Instruction compression is not allowed for this instruction (that
2376 * is, send). The hardware behavior is undefined if this instruction is
2377 * set as compressed. However, compress control can be set to "SecHalf"
2378 * to affect the EMask generation."
2379 *
2380 * No similar wording is found in later PRMs, but there are examples
2381 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2382 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2383 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2384 */
2385 brw_inst_set_compression(devinfo, insn, false);
2386
2387 if (devinfo->gen < 6)
2388 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2389
2390 brw_set_dest(p, insn, dest);
2391 brw_set_src0(p, insn, src0);
2392 brw_set_sampler_message(p, insn,
2393 binding_table_index,
2394 sampler,
2395 msg_type,
2396 response_length,
2397 msg_length,
2398 header_present,
2399 simd_mode,
2400 return_format);
2401 }
2402
2403 /* Adjust the message header's sampler state pointer to
2404 * select the correct group of 16 samplers.
2405 */
2406 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2407 struct brw_reg header,
2408 struct brw_reg sampler_index)
2409 {
2410 /* The "Sampler Index" field can only store values between 0 and 15.
2411 * However, we can add an offset to the "Sampler State Pointer"
2412 * field, effectively selecting a different set of 16 samplers.
2413 *
2414 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2415 * offset, and each sampler state is only 16-bytes, so we can't
2416 * exclusively use the offset - we have to use both.
2417 */
2418
2419 const struct gen_device_info *devinfo = p->devinfo;
2420
2421 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2422 const int sampler_state_size = 16; /* 16 bytes */
2423 uint32_t sampler = sampler_index.ud;
2424
2425 if (sampler >= 16) {
2426 assert(devinfo->is_haswell || devinfo->gen >= 8);
2427 brw_ADD(p,
2428 get_element_ud(header, 3),
2429 get_element_ud(brw_vec8_grf(0, 0), 3),
2430 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2431 }
2432 } else {
2433 /* Non-const sampler array indexing case */
2434 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2435 return;
2436 }
2437
2438 struct brw_reg temp = get_element_ud(header, 3);
2439
2440 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2441 brw_SHL(p, temp, temp, brw_imm_ud(4));
2442 brw_ADD(p,
2443 get_element_ud(header, 3),
2444 get_element_ud(brw_vec8_grf(0, 0), 3),
2445 temp);
2446 }
2447 }
2448
2449 /* All these variables are pretty confusing - we might be better off
2450 * using bitmasks and macros for this, in the old style. Or perhaps
2451 * just having the caller instantiate the fields in dword3 itself.
2452 */
2453 void brw_urb_WRITE(struct brw_codegen *p,
2454 struct brw_reg dest,
2455 unsigned msg_reg_nr,
2456 struct brw_reg src0,
2457 enum brw_urb_write_flags flags,
2458 unsigned msg_length,
2459 unsigned response_length,
2460 unsigned offset,
2461 unsigned swizzle)
2462 {
2463 const struct gen_device_info *devinfo = p->devinfo;
2464 brw_inst *insn;
2465
2466 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2467
2468 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2469 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2470 brw_push_insn_state(p);
2471 brw_set_default_access_mode(p, BRW_ALIGN_1);
2472 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2473 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2474 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2475 BRW_REGISTER_TYPE_UD),
2476 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2477 brw_imm_ud(0xff00));
2478 brw_pop_insn_state(p);
2479 }
2480
2481 insn = next_insn(p, BRW_OPCODE_SEND);
2482
2483 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2484
2485 brw_set_dest(p, insn, dest);
2486 brw_set_src0(p, insn, src0);
2487 brw_set_src1(p, insn, brw_imm_d(0));
2488
2489 if (devinfo->gen < 6)
2490 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2491
2492 brw_set_urb_message(p,
2493 insn,
2494 flags,
2495 msg_length,
2496 response_length,
2497 offset,
2498 swizzle);
2499 }
2500
2501 struct brw_inst *
2502 brw_send_indirect_message(struct brw_codegen *p,
2503 unsigned sfid,
2504 struct brw_reg dst,
2505 struct brw_reg payload,
2506 struct brw_reg desc)
2507 {
2508 const struct gen_device_info *devinfo = p->devinfo;
2509 struct brw_inst *send;
2510 int setup;
2511
2512 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2513
2514 assert(desc.type == BRW_REGISTER_TYPE_UD);
2515
2516 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2517 * in the indirect case) by its index in the instruction store. The
2518 * pointer returned by next_insn() may become invalid if emitting the SEND
2519 * in the indirect case reallocs the store.
2520 */
2521
2522 if (desc.file == BRW_IMMEDIATE_VALUE) {
2523 setup = p->nr_insn;
2524 send = next_insn(p, BRW_OPCODE_SEND);
2525 brw_set_src1(p, send, desc);
2526
2527 } else {
2528 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2529
2530 brw_push_insn_state(p);
2531 brw_set_default_access_mode(p, BRW_ALIGN_1);
2532 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2533 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2534 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2535
2536 /* Load the indirect descriptor to an address register using OR so the
2537 * caller can specify additional descriptor bits with the usual
2538 * brw_set_*_message() helper functions.
2539 */
2540 setup = p->nr_insn;
2541 brw_OR(p, addr, desc, brw_imm_ud(0));
2542
2543 brw_pop_insn_state(p);
2544
2545 send = next_insn(p, BRW_OPCODE_SEND);
2546 brw_set_src1(p, send, addr);
2547 }
2548
2549 if (dst.width < BRW_EXECUTE_8)
2550 brw_inst_set_exec_size(devinfo, send, dst.width);
2551
2552 brw_set_dest(p, send, dst);
2553 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2554 brw_inst_set_sfid(devinfo, send, sfid);
2555
2556 return &p->store[setup];
2557 }
2558
2559 static struct brw_inst *
2560 brw_send_indirect_surface_message(struct brw_codegen *p,
2561 unsigned sfid,
2562 struct brw_reg dst,
2563 struct brw_reg payload,
2564 struct brw_reg surface,
2565 unsigned message_len,
2566 unsigned response_len,
2567 bool header_present)
2568 {
2569 const struct gen_device_info *devinfo = p->devinfo;
2570 struct brw_inst *insn;
2571
2572 if (surface.file != BRW_IMMEDIATE_VALUE) {
2573 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2574
2575 brw_push_insn_state(p);
2576 brw_set_default_access_mode(p, BRW_ALIGN_1);
2577 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2578 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2579 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2580
2581 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2582 * some surface array is accessed out of bounds.
2583 */
2584 insn = brw_AND(p, addr,
2585 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2586 BRW_GET_SWZ(surface.swizzle, 0)),
2587 brw_imm_ud(0xff));
2588
2589 brw_pop_insn_state(p);
2590
2591 surface = addr;
2592 }
2593
2594 insn = brw_send_indirect_message(p, sfid, dst, payload, surface);
2595 brw_inst_set_mlen(devinfo, insn, message_len);
2596 brw_inst_set_rlen(devinfo, insn, response_len);
2597 brw_inst_set_header_present(devinfo, insn, header_present);
2598
2599 return insn;
2600 }
2601
2602 static bool
2603 while_jumps_before_offset(const struct gen_device_info *devinfo,
2604 brw_inst *insn, int while_offset, int start_offset)
2605 {
2606 int scale = 16 / brw_jump_scale(devinfo);
2607 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2608 : brw_inst_jip(devinfo, insn);
2609 assert(jip < 0);
2610 return while_offset + jip * scale <= start_offset;
2611 }
2612
2613
2614 static int
2615 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2616 {
2617 int offset;
2618 void *store = p->store;
2619 const struct gen_device_info *devinfo = p->devinfo;
2620
2621 int depth = 0;
2622
2623 for (offset = next_offset(devinfo, store, start_offset);
2624 offset < p->next_insn_offset;
2625 offset = next_offset(devinfo, store, offset)) {
2626 brw_inst *insn = store + offset;
2627
2628 switch (brw_inst_opcode(devinfo, insn)) {
2629 case BRW_OPCODE_IF:
2630 depth++;
2631 break;
2632 case BRW_OPCODE_ENDIF:
2633 if (depth == 0)
2634 return offset;
2635 depth--;
2636 break;
2637 case BRW_OPCODE_WHILE:
2638 /* If the while doesn't jump before our instruction, it's the end
2639 * of a sibling do...while loop. Ignore it.
2640 */
2641 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2642 continue;
2643 /* fallthrough */
2644 case BRW_OPCODE_ELSE:
2645 case BRW_OPCODE_HALT:
2646 if (depth == 0)
2647 return offset;
2648 }
2649 }
2650
2651 return 0;
2652 }
2653
2654 /* There is no DO instruction on gen6, so to find the end of the loop
2655 * we have to see if the loop is jumping back before our start
2656 * instruction.
2657 */
2658 static int
2659 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2660 {
2661 const struct gen_device_info *devinfo = p->devinfo;
2662 int offset;
2663 void *store = p->store;
2664
2665 assert(devinfo->gen >= 6);
2666
2667 /* Always start after the instruction (such as a WHILE) we're trying to fix
2668 * up.
2669 */
2670 for (offset = next_offset(devinfo, store, start_offset);
2671 offset < p->next_insn_offset;
2672 offset = next_offset(devinfo, store, offset)) {
2673 brw_inst *insn = store + offset;
2674
2675 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2676 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2677 return offset;
2678 }
2679 }
2680 assert(!"not reached");
2681 return start_offset;
2682 }
2683
2684 /* After program generation, go back and update the UIP and JIP of
2685 * BREAK, CONT, and HALT instructions to their correct locations.
2686 */
2687 void
2688 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2689 {
2690 const struct gen_device_info *devinfo = p->devinfo;
2691 int offset;
2692 int br = brw_jump_scale(devinfo);
2693 int scale = 16 / br;
2694 void *store = p->store;
2695
2696 if (devinfo->gen < 6)
2697 return;
2698
2699 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2700 brw_inst *insn = store + offset;
2701 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2702
2703 int block_end_offset = brw_find_next_block_end(p, offset);
2704 switch (brw_inst_opcode(devinfo, insn)) {
2705 case BRW_OPCODE_BREAK:
2706 assert(block_end_offset != 0);
2707 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2708 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2709 brw_inst_set_uip(devinfo, insn,
2710 (brw_find_loop_end(p, offset) - offset +
2711 (devinfo->gen == 6 ? 16 : 0)) / scale);
2712 break;
2713 case BRW_OPCODE_CONTINUE:
2714 assert(block_end_offset != 0);
2715 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2716 brw_inst_set_uip(devinfo, insn,
2717 (brw_find_loop_end(p, offset) - offset) / scale);
2718
2719 assert(brw_inst_uip(devinfo, insn) != 0);
2720 assert(brw_inst_jip(devinfo, insn) != 0);
2721 break;
2722
2723 case BRW_OPCODE_ENDIF: {
2724 int32_t jump = (block_end_offset == 0) ?
2725 1 * br : (block_end_offset - offset) / scale;
2726 if (devinfo->gen >= 7)
2727 brw_inst_set_jip(devinfo, insn, jump);
2728 else
2729 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2730 break;
2731 }
2732
2733 case BRW_OPCODE_HALT:
2734 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2735 *
2736 * "In case of the halt instruction not inside any conditional
2737 * code block, the value of <JIP> and <UIP> should be the
2738 * same. In case of the halt instruction inside conditional code
2739 * block, the <UIP> should be the end of the program, and the
2740 * <JIP> should be end of the most inner conditional code block."
2741 *
2742 * The uip will have already been set by whoever set up the
2743 * instruction.
2744 */
2745 if (block_end_offset == 0) {
2746 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2747 } else {
2748 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2749 }
2750 assert(brw_inst_uip(devinfo, insn) != 0);
2751 assert(brw_inst_jip(devinfo, insn) != 0);
2752 break;
2753 }
2754 }
2755 }
2756
2757 void brw_ff_sync(struct brw_codegen *p,
2758 struct brw_reg dest,
2759 unsigned msg_reg_nr,
2760 struct brw_reg src0,
2761 bool allocate,
2762 unsigned response_length,
2763 bool eot)
2764 {
2765 const struct gen_device_info *devinfo = p->devinfo;
2766 brw_inst *insn;
2767
2768 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2769
2770 insn = next_insn(p, BRW_OPCODE_SEND);
2771 brw_set_dest(p, insn, dest);
2772 brw_set_src0(p, insn, src0);
2773 brw_set_src1(p, insn, brw_imm_d(0));
2774
2775 if (devinfo->gen < 6)
2776 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2777
2778 brw_set_ff_sync_message(p,
2779 insn,
2780 allocate,
2781 response_length,
2782 eot);
2783 }
2784
2785 /**
2786 * Emit the SEND instruction necessary to generate stream output data on Gen6
2787 * (for transform feedback).
2788 *
2789 * If send_commit_msg is true, this is the last piece of stream output data
2790 * from this thread, so send the data as a committed write. According to the
2791 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2792 *
2793 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2794 * writes are complete by sending the final write as a committed write."
2795 */
2796 void
2797 brw_svb_write(struct brw_codegen *p,
2798 struct brw_reg dest,
2799 unsigned msg_reg_nr,
2800 struct brw_reg src0,
2801 unsigned binding_table_index,
2802 bool send_commit_msg)
2803 {
2804 const struct gen_device_info *devinfo = p->devinfo;
2805 const unsigned target_cache =
2806 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2807 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2808 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2809 brw_inst *insn;
2810
2811 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2812
2813 insn = next_insn(p, BRW_OPCODE_SEND);
2814 brw_set_dest(p, insn, dest);
2815 brw_set_src0(p, insn, src0);
2816 brw_set_src1(p, insn, brw_imm_d(0));
2817 brw_set_dp_write_message(p, insn,
2818 binding_table_index,
2819 0, /* msg_control: ignored */
2820 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2821 target_cache,
2822 1, /* msg_length */
2823 true, /* header_present */
2824 0, /* last_render_target: ignored */
2825 send_commit_msg, /* response_length */
2826 0, /* end_of_thread */
2827 send_commit_msg); /* send_commit_msg */
2828 }
2829
2830 static unsigned
2831 brw_surface_payload_size(struct brw_codegen *p,
2832 unsigned num_channels,
2833 bool has_simd4x2,
2834 bool has_simd16)
2835 {
2836 if (has_simd4x2 &&
2837 brw_inst_access_mode(p->devinfo, p->current) == BRW_ALIGN_16)
2838 return 1;
2839 else if (has_simd16 &&
2840 brw_inst_exec_size(p->devinfo, p->current) == BRW_EXECUTE_16)
2841 return 2 * num_channels;
2842 else
2843 return num_channels;
2844 }
2845
2846 static void
2847 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2848 brw_inst *insn,
2849 unsigned atomic_op,
2850 bool response_expected)
2851 {
2852 const struct gen_device_info *devinfo = p->devinfo;
2853 unsigned msg_control =
2854 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2855 (response_expected ? 1 << 5 : 0); /* Return data expected */
2856
2857 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2858 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2859 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2860 msg_control |= 1 << 4; /* SIMD8 mode */
2861
2862 brw_inst_set_dp_msg_type(devinfo, insn,
2863 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2864 } else {
2865 brw_inst_set_dp_msg_type(devinfo, insn,
2866 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2867 }
2868 } else {
2869 brw_inst_set_dp_msg_type(devinfo, insn,
2870 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2871
2872 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2873 msg_control |= 1 << 4; /* SIMD8 mode */
2874 }
2875
2876 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2877 }
2878
2879 void
2880 brw_untyped_atomic(struct brw_codegen *p,
2881 struct brw_reg dst,
2882 struct brw_reg payload,
2883 struct brw_reg surface,
2884 unsigned atomic_op,
2885 unsigned msg_length,
2886 bool response_expected)
2887 {
2888 const struct gen_device_info *devinfo = p->devinfo;
2889 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2890 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2891 GEN7_SFID_DATAPORT_DATA_CACHE);
2892 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2893 /* Mask out unused components -- This is especially important in Align16
2894 * mode on generations that don't have native support for SIMD4x2 atomics,
2895 * because unused but enabled components will cause the dataport to perform
2896 * additional atomic operations on the addresses that happen to be in the
2897 * uninitialized Y, Z and W coordinates of the payload.
2898 */
2899 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2900 struct brw_inst *insn = brw_send_indirect_surface_message(
2901 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2902 brw_surface_payload_size(p, response_expected,
2903 devinfo->gen >= 8 || devinfo->is_haswell, true),
2904 align1);
2905
2906 brw_set_dp_untyped_atomic_message(
2907 p, insn, atomic_op, response_expected);
2908 }
2909
2910 static void
2911 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2912 struct brw_inst *insn,
2913 unsigned num_channels)
2914 {
2915 const struct gen_device_info *devinfo = p->devinfo;
2916 /* Set mask of 32-bit channels to drop. */
2917 unsigned msg_control = 0xf & (0xf << num_channels);
2918
2919 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2920 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2921 msg_control |= 1 << 4; /* SIMD16 mode */
2922 else
2923 msg_control |= 2 << 4; /* SIMD8 mode */
2924 }
2925
2926 brw_inst_set_dp_msg_type(devinfo, insn,
2927 (devinfo->gen >= 8 || devinfo->is_haswell ?
2928 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2929 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
2930 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2931 }
2932
2933 void
2934 brw_untyped_surface_read(struct brw_codegen *p,
2935 struct brw_reg dst,
2936 struct brw_reg payload,
2937 struct brw_reg surface,
2938 unsigned msg_length,
2939 unsigned num_channels)
2940 {
2941 const struct gen_device_info *devinfo = p->devinfo;
2942 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2943 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2944 GEN7_SFID_DATAPORT_DATA_CACHE);
2945 struct brw_inst *insn = brw_send_indirect_surface_message(
2946 p, sfid, dst, payload, surface, msg_length,
2947 brw_surface_payload_size(p, num_channels, true, true),
2948 false);
2949
2950 brw_set_dp_untyped_surface_read_message(
2951 p, insn, num_channels);
2952 }
2953
2954 static void
2955 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
2956 struct brw_inst *insn,
2957 unsigned num_channels)
2958 {
2959 const struct gen_device_info *devinfo = p->devinfo;
2960 /* Set mask of 32-bit channels to drop. */
2961 unsigned msg_control = 0xf & (0xf << num_channels);
2962
2963 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2964 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2965 msg_control |= 1 << 4; /* SIMD16 mode */
2966 else
2967 msg_control |= 2 << 4; /* SIMD8 mode */
2968 } else {
2969 if (devinfo->gen >= 8 || devinfo->is_haswell)
2970 msg_control |= 0 << 4; /* SIMD4x2 mode */
2971 else
2972 msg_control |= 2 << 4; /* SIMD8 mode */
2973 }
2974
2975 brw_inst_set_dp_msg_type(devinfo, insn,
2976 devinfo->gen >= 8 || devinfo->is_haswell ?
2977 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2978 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2979 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2980 }
2981
2982 void
2983 brw_untyped_surface_write(struct brw_codegen *p,
2984 struct brw_reg payload,
2985 struct brw_reg surface,
2986 unsigned msg_length,
2987 unsigned num_channels)
2988 {
2989 const struct gen_device_info *devinfo = p->devinfo;
2990 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2991 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2992 GEN7_SFID_DATAPORT_DATA_CACHE);
2993 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2994 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2995 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2996 WRITEMASK_X : WRITEMASK_XYZW;
2997 struct brw_inst *insn = brw_send_indirect_surface_message(
2998 p, sfid, brw_writemask(brw_null_reg(), mask),
2999 payload, surface, msg_length, 0, align1);
3000
3001 brw_set_dp_untyped_surface_write_message(
3002 p, insn, num_channels);
3003 }
3004
3005 static unsigned
3006 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
3007 {
3008 switch (bit_size) {
3009 case 8:
3010 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
3011 case 16:
3012 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
3013 case 32:
3014 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
3015 default:
3016 unreachable("Unsupported bit_size for byte scattered messages");
3017 }
3018 }
3019
3020
3021 void
3022 brw_byte_scattered_read(struct brw_codegen *p,
3023 struct brw_reg dst,
3024 struct brw_reg payload,
3025 struct brw_reg surface,
3026 unsigned msg_length,
3027 unsigned bit_size)
3028 {
3029 const struct gen_device_info *devinfo = p->devinfo;
3030 assert(devinfo->gen > 7 || devinfo->is_haswell);
3031 assert(brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3032 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3033
3034 struct brw_inst *insn = brw_send_indirect_surface_message(
3035 p, sfid, dst, payload, surface, msg_length,
3036 brw_surface_payload_size(p, 1, true, true),
3037 false);
3038
3039 unsigned msg_control =
3040 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3041
3042 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
3043 msg_control |= 1; /* SIMD16 mode */
3044 else
3045 msg_control |= 0; /* SIMD8 mode */
3046
3047 brw_inst_set_dp_msg_type(devinfo, insn,
3048 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3049 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3050 }
3051
3052 void
3053 brw_byte_scattered_write(struct brw_codegen *p,
3054 struct brw_reg payload,
3055 struct brw_reg surface,
3056 unsigned msg_length,
3057 unsigned bit_size)
3058 {
3059 const struct gen_device_info *devinfo = p->devinfo;
3060 assert(devinfo->gen > 7 || devinfo->is_haswell);
3061 assert(brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3062 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3063
3064 struct brw_inst *insn = brw_send_indirect_surface_message(
3065 p, sfid, brw_writemask(brw_null_reg(), WRITEMASK_XYZW),
3066 payload, surface, msg_length, 0, true);
3067
3068 unsigned msg_control =
3069 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3070
3071 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
3072 msg_control |= 1;
3073 else
3074 msg_control |= 0;
3075
3076 brw_inst_set_dp_msg_type(devinfo, insn,
3077 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3078 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3079 }
3080
3081 static void
3082 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3083 struct brw_inst *insn,
3084 unsigned atomic_op,
3085 bool response_expected)
3086 {
3087 const struct gen_device_info *devinfo = p->devinfo;
3088 unsigned msg_control =
3089 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3090 (response_expected ? 1 << 5 : 0); /* Return data expected */
3091
3092 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3093 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3094 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3095 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3096
3097 brw_inst_set_dp_msg_type(devinfo, insn,
3098 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3099 } else {
3100 brw_inst_set_dp_msg_type(devinfo, insn,
3101 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3102 }
3103
3104 } else {
3105 brw_inst_set_dp_msg_type(devinfo, insn,
3106 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3107
3108 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3109 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3110 }
3111
3112 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3113 }
3114
3115 void
3116 brw_typed_atomic(struct brw_codegen *p,
3117 struct brw_reg dst,
3118 struct brw_reg payload,
3119 struct brw_reg surface,
3120 unsigned atomic_op,
3121 unsigned msg_length,
3122 bool response_expected) {
3123 const struct gen_device_info *devinfo = p->devinfo;
3124 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3125 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3126 GEN6_SFID_DATAPORT_RENDER_CACHE);
3127 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3128 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3129 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3130 struct brw_inst *insn = brw_send_indirect_surface_message(
3131 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3132 brw_surface_payload_size(p, response_expected,
3133 devinfo->gen >= 8 || devinfo->is_haswell, false),
3134 true);
3135
3136 brw_set_dp_typed_atomic_message(
3137 p, insn, atomic_op, response_expected);
3138 }
3139
3140 static void
3141 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3142 struct brw_inst *insn,
3143 unsigned num_channels)
3144 {
3145 const struct gen_device_info *devinfo = p->devinfo;
3146 /* Set mask of unused channels. */
3147 unsigned msg_control = 0xf & (0xf << num_channels);
3148
3149 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3150 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3151 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3152 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3153 else
3154 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3155 }
3156
3157 brw_inst_set_dp_msg_type(devinfo, insn,
3158 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3159 } else {
3160 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3161 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3162 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3163 }
3164
3165 brw_inst_set_dp_msg_type(devinfo, insn,
3166 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3167 }
3168
3169 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3170 }
3171
3172 void
3173 brw_typed_surface_read(struct brw_codegen *p,
3174 struct brw_reg dst,
3175 struct brw_reg payload,
3176 struct brw_reg surface,
3177 unsigned msg_length,
3178 unsigned num_channels)
3179 {
3180 const struct gen_device_info *devinfo = p->devinfo;
3181 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3182 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3183 GEN6_SFID_DATAPORT_RENDER_CACHE);
3184 struct brw_inst *insn = brw_send_indirect_surface_message(
3185 p, sfid, dst, payload, surface, msg_length,
3186 brw_surface_payload_size(p, num_channels,
3187 devinfo->gen >= 8 || devinfo->is_haswell, false),
3188 true);
3189
3190 brw_set_dp_typed_surface_read_message(
3191 p, insn, num_channels);
3192 }
3193
3194 static void
3195 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3196 struct brw_inst *insn,
3197 unsigned num_channels)
3198 {
3199 const struct gen_device_info *devinfo = p->devinfo;
3200 /* Set mask of unused channels. */
3201 unsigned msg_control = 0xf & (0xf << num_channels);
3202
3203 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3204 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3205 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3206 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3207 else
3208 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3209 }
3210
3211 brw_inst_set_dp_msg_type(devinfo, insn,
3212 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3213
3214 } else {
3215 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3216 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3217 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3218 }
3219
3220 brw_inst_set_dp_msg_type(devinfo, insn,
3221 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3222 }
3223
3224 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3225 }
3226
3227 void
3228 brw_typed_surface_write(struct brw_codegen *p,
3229 struct brw_reg payload,
3230 struct brw_reg surface,
3231 unsigned msg_length,
3232 unsigned num_channels)
3233 {
3234 const struct gen_device_info *devinfo = p->devinfo;
3235 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3236 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3237 GEN6_SFID_DATAPORT_RENDER_CACHE);
3238 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3239 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3240 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3241 WRITEMASK_X : WRITEMASK_XYZW);
3242 struct brw_inst *insn = brw_send_indirect_surface_message(
3243 p, sfid, brw_writemask(brw_null_reg(), mask),
3244 payload, surface, msg_length, 0, true);
3245
3246 brw_set_dp_typed_surface_write_message(
3247 p, insn, num_channels);
3248 }
3249
3250 static void
3251 brw_set_memory_fence_message(struct brw_codegen *p,
3252 struct brw_inst *insn,
3253 enum brw_message_target sfid,
3254 bool commit_enable)
3255 {
3256 const struct gen_device_info *devinfo = p->devinfo;
3257
3258 brw_set_message_descriptor(p, insn, sfid,
3259 1 /* message length */,
3260 (commit_enable ? 1 : 0) /* response length */,
3261 true /* header present */,
3262 false);
3263
3264 switch (sfid) {
3265 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3266 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3267 break;
3268 case GEN7_SFID_DATAPORT_DATA_CACHE:
3269 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3270 break;
3271 default:
3272 unreachable("Not reached");
3273 }
3274
3275 if (commit_enable)
3276 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3277 }
3278
3279 void
3280 brw_memory_fence(struct brw_codegen *p,
3281 struct brw_reg dst)
3282 {
3283 const struct gen_device_info *devinfo = p->devinfo;
3284 const bool commit_enable = devinfo->gen == 7 && !devinfo->is_haswell;
3285 struct brw_inst *insn;
3286
3287 brw_push_insn_state(p);
3288 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3289 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3290 dst = vec1(dst);
3291
3292 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3293 * message doesn't write anything back.
3294 */
3295 insn = next_insn(p, BRW_OPCODE_SEND);
3296 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3297 brw_set_dest(p, insn, dst);
3298 brw_set_src0(p, insn, dst);
3299 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3300 commit_enable);
3301
3302 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3303 /* IVB does typed surface access through the render cache, so we need to
3304 * flush it too. Use a different register so both flushes can be
3305 * pipelined by the hardware.
3306 */
3307 insn = next_insn(p, BRW_OPCODE_SEND);
3308 brw_set_dest(p, insn, offset(dst, 1));
3309 brw_set_src0(p, insn, offset(dst, 1));
3310 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3311 commit_enable);
3312
3313 /* Now write the response of the second message into the response of the
3314 * first to trigger a pipeline stall -- This way future render and data
3315 * cache messages will be properly ordered with respect to past data and
3316 * render cache messages.
3317 */
3318 brw_MOV(p, dst, offset(dst, 1));
3319 }
3320
3321 brw_pop_insn_state(p);
3322 }
3323
3324 void
3325 brw_pixel_interpolator_query(struct brw_codegen *p,
3326 struct brw_reg dest,
3327 struct brw_reg mrf,
3328 bool noperspective,
3329 unsigned mode,
3330 struct brw_reg data,
3331 unsigned msg_length,
3332 unsigned response_length)
3333 {
3334 const struct gen_device_info *devinfo = p->devinfo;
3335 struct brw_inst *insn;
3336 const uint16_t exec_size = brw_inst_exec_size(devinfo, p->current);
3337
3338 /* brw_send_indirect_message will automatically use a direct send message
3339 * if data is actually immediate.
3340 */
3341 insn = brw_send_indirect_message(p,
3342 GEN7_SFID_PIXEL_INTERPOLATOR,
3343 dest,
3344 mrf,
3345 vec1(data));
3346 brw_inst_set_mlen(devinfo, insn, msg_length);
3347 brw_inst_set_rlen(devinfo, insn, response_length);
3348
3349 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3350 brw_inst_set_pi_slot_group(devinfo, insn, 0); /* zero unless 32/64px dispatch */
3351 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3352 brw_inst_set_pi_message_type(devinfo, insn, mode);
3353 }
3354
3355 void
3356 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3357 struct brw_reg mask)
3358 {
3359 const struct gen_device_info *devinfo = p->devinfo;
3360 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
3361 const unsigned qtr_control = brw_inst_qtr_control(devinfo, p->current);
3362 brw_inst *inst;
3363
3364 assert(devinfo->gen >= 7);
3365 assert(mask.type == BRW_REGISTER_TYPE_UD);
3366
3367 brw_push_insn_state(p);
3368
3369 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3370 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3371
3372 if (devinfo->gen >= 8) {
3373 /* Getting the first active channel index is easy on Gen8: Just find
3374 * the first bit set in the execution mask. The register exists on
3375 * HSW already but it reads back as all ones when the current
3376 * instruction has execution masking disabled, so it's kind of
3377 * useless.
3378 */
3379 struct brw_reg exec_mask =
3380 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3381
3382 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3383 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3384 /* Unfortunately, ce0 does not take into account the thread
3385 * dispatch mask, which may be a problem in cases where it's not
3386 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3387 * some n). Combine ce0 with the given dispatch (or vector) mask
3388 * to mask off those channels which were never dispatched by the
3389 * hardware.
3390 */
3391 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3392 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3393 exec_mask = vec1(dst);
3394 }
3395
3396 /* Quarter control has the effect of magically shifting the value of
3397 * ce0 so you'll get the first active channel relative to the
3398 * specified quarter control as result.
3399 */
3400 inst = brw_FBL(p, vec1(dst), exec_mask);
3401 } else {
3402 const struct brw_reg flag = brw_flag_reg(1, 0);
3403
3404 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3405 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3406
3407 /* Run enough instructions returning zero with execution masking and
3408 * a conditional modifier enabled in order to get the full execution
3409 * mask in f1.0. We could use a single 32-wide move here if it
3410 * weren't because of the hardware bug that causes channel enables to
3411 * be applied incorrectly to the second half of 32-wide instructions
3412 * on Gen7.
3413 */
3414 const unsigned lower_size = MIN2(16, exec_size);
3415 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3416 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3417 brw_imm_uw(0));
3418 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3419 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3420 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3421 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3422 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3423 }
3424
3425 /* Find the first bit set in the exec_size-wide portion of the flag
3426 * register that was updated by the last sequence of MOV
3427 * instructions.
3428 */
3429 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3430 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3431 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3432 }
3433 } else {
3434 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3435
3436 if (devinfo->gen >= 8 &&
3437 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3438 /* In SIMD4x2 mode the first active channel index is just the
3439 * negation of the first bit of the mask register. Note that ce0
3440 * doesn't take into account the dispatch mask, so the Gen7 path
3441 * should be used instead unless you have the guarantee that the
3442 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3443 * for some n).
3444 */
3445 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3446 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3447 brw_imm_ud(1));
3448
3449 } else {
3450 /* Overwrite the destination without and with execution masking to
3451 * find out which of the channels is active.
3452 */
3453 brw_push_insn_state(p);
3454 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3455 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3456 brw_imm_ud(1));
3457
3458 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3459 brw_imm_ud(0));
3460 brw_pop_insn_state(p);
3461 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3462 }
3463 }
3464
3465 brw_pop_insn_state(p);
3466 }
3467
3468 void
3469 brw_broadcast(struct brw_codegen *p,
3470 struct brw_reg dst,
3471 struct brw_reg src,
3472 struct brw_reg idx)
3473 {
3474 const struct gen_device_info *devinfo = p->devinfo;
3475 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3476 brw_inst *inst;
3477
3478 brw_push_insn_state(p);
3479 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3480 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3481
3482 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3483 src.address_mode == BRW_ADDRESS_DIRECT);
3484 assert(!src.abs && !src.negate);
3485 assert(src.type == dst.type);
3486
3487 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3488 idx.file == BRW_IMMEDIATE_VALUE) {
3489 /* Trivial, the source is already uniform or the index is a constant.
3490 * We will typically not get here if the optimizer is doing its job, but
3491 * asserting would be mean.
3492 */
3493 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3494 brw_MOV(p, dst,
3495 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3496 stride(suboffset(src, 4 * i), 0, 4, 1)));
3497 } else {
3498 /* From the Haswell PRM section "Register Region Restrictions":
3499 *
3500 * "The lower bits of the AddressImmediate must not overflow to
3501 * change the register address. The lower 5 bits of Address
3502 * Immediate when added to lower 5 bits of address register gives
3503 * the sub-register offset. The upper bits of Address Immediate
3504 * when added to upper bits of address register gives the register
3505 * address. Any overflow from sub-register offset is dropped."
3506 *
3507 * Fortunately, for broadcast, we never have a sub-register offset so
3508 * this isn't an issue.
3509 */
3510 assert(src.subnr == 0);
3511
3512 if (align1) {
3513 const struct brw_reg addr =
3514 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3515 unsigned offset = src.nr * REG_SIZE + src.subnr;
3516 /* Limit in bytes of the signed indirect addressing immediate. */
3517 const unsigned limit = 512;
3518
3519 brw_push_insn_state(p);
3520 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3521 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3522
3523 /* Take into account the component size and horizontal stride. */
3524 assert(src.vstride == src.hstride + src.width);
3525 brw_SHL(p, addr, vec1(idx),
3526 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3527 src.hstride - 1));
3528
3529 /* We can only address up to limit bytes using the indirect
3530 * addressing immediate, account for the difference if the source
3531 * register is above this limit.
3532 */
3533 if (offset >= limit) {
3534 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3535 offset = offset % limit;
3536 }
3537
3538 brw_pop_insn_state(p);
3539
3540 /* Use indirect addressing to fetch the specified component. */
3541 if (type_sz(src.type) > 4 &&
3542 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3543 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3544 *
3545 * "When source or destination datatype is 64b or operation is
3546 * integer DWord multiply, indirect addressing must not be
3547 * used."
3548 *
3549 * To work around both of this issue, we do two integer MOVs
3550 * insead of one 64-bit MOV. Because no double value should ever
3551 * cross a register boundary, it's safe to use the immediate
3552 * offset in the indirect here to handle adding 4 bytes to the
3553 * offset and avoid the extra ADD to the register file.
3554 */
3555 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3556 retype(brw_vec1_indirect(addr.subnr, offset),
3557 BRW_REGISTER_TYPE_D));
3558 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3559 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3560 BRW_REGISTER_TYPE_D));
3561 } else {
3562 brw_MOV(p, dst,
3563 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3564 }
3565 } else {
3566 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3567 * to all bits of a flag register,
3568 */
3569 inst = brw_MOV(p,
3570 brw_null_reg(),
3571 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3572 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3573 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3574 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3575
3576 /* and use predicated SEL to pick the right channel. */
3577 inst = brw_SEL(p, dst,
3578 stride(suboffset(src, 4), 4, 4, 1),
3579 stride(src, 4, 4, 1));
3580 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3581 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3582 }
3583 }
3584
3585 brw_pop_insn_state(p);
3586 }
3587
3588 /**
3589 * This instruction is generated as a single-channel align1 instruction by
3590 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3591 *
3592 * We can't use the typed atomic op in the FS because that has the execution
3593 * mask ANDed with the pixel mask, but we just want to write the one dword for
3594 * all the pixels.
3595 *
3596 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3597 * one u32. So we use the same untyped atomic write message as the pixel
3598 * shader.
3599 *
3600 * The untyped atomic operation requires a BUFFER surface type with RAW
3601 * format, and is only accessible through the legacy DATA_CACHE dataport
3602 * messages.
3603 */
3604 void brw_shader_time_add(struct brw_codegen *p,
3605 struct brw_reg payload,
3606 uint32_t surf_index)
3607 {
3608 const unsigned sfid = (p->devinfo->gen >= 8 || p->devinfo->is_haswell ?
3609 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3610 GEN7_SFID_DATAPORT_DATA_CACHE);
3611 assert(p->devinfo->gen >= 7);
3612
3613 brw_push_insn_state(p);
3614 brw_set_default_access_mode(p, BRW_ALIGN_1);
3615 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3616 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3617 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3618
3619 /* We use brw_vec1_reg and unmasked because we want to increment the given
3620 * offset only once.
3621 */
3622 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3623 BRW_ARF_NULL, 0));
3624 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3625 payload.nr, 0));
3626 brw_set_src1(p, send, brw_imm_ud(0));
3627 brw_set_message_descriptor(p, send, sfid, 2, 0, false, false);
3628 brw_inst_set_binding_table_index(p->devinfo, send, surf_index);
3629 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3630
3631 brw_pop_insn_state(p);
3632 }
3633
3634
3635 /**
3636 * Emit the SEND message for a barrier
3637 */
3638 void
3639 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3640 {
3641 const struct gen_device_info *devinfo = p->devinfo;
3642 struct brw_inst *inst;
3643
3644 assert(devinfo->gen >= 7);
3645
3646 brw_push_insn_state(p);
3647 brw_set_default_access_mode(p, BRW_ALIGN_1);
3648 inst = next_insn(p, BRW_OPCODE_SEND);
3649 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3650 brw_set_src0(p, inst, src);
3651 brw_set_src1(p, inst, brw_null_reg());
3652
3653 brw_set_message_descriptor(p, inst, BRW_SFID_MESSAGE_GATEWAY,
3654 1 /* msg_length */,
3655 0 /* response_length */,
3656 false /* header_present */,
3657 false /* end_of_thread */);
3658
3659 brw_inst_set_gateway_notify(devinfo, inst, 1);
3660 brw_inst_set_gateway_subfuncid(devinfo, inst,
3661 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3662
3663 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3664 brw_pop_insn_state(p);
3665 }
3666
3667
3668 /**
3669 * Emit the wait instruction for a barrier
3670 */
3671 void
3672 brw_WAIT(struct brw_codegen *p)
3673 {
3674 const struct gen_device_info *devinfo = p->devinfo;
3675 struct brw_inst *insn;
3676
3677 struct brw_reg src = brw_notification_reg();
3678
3679 insn = next_insn(p, BRW_OPCODE_WAIT);
3680 brw_set_dest(p, insn, src);
3681 brw_set_src0(p, insn, src);
3682 brw_set_src1(p, insn, brw_null_reg());
3683
3684 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3685 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3686 }
3687
3688 /**
3689 * Changes the floating point rounding mode updating the control register
3690 * field defined at cr0.0[5-6] bits. This function supports the changes to
3691 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3692 * Only RTNE and RTZ rounding are enabled at nir.
3693 */
3694 void
3695 brw_rounding_mode(struct brw_codegen *p,
3696 enum brw_rnd_mode mode)
3697 {
3698 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3699
3700 if (bits != BRW_CR0_RND_MODE_MASK) {
3701 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3702 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3703
3704 /* From the Skylake PRM, Volume 7, page 760:
3705 * "Implementation Restriction on Register Access: When the control
3706 * register is used as an explicit source and/or destination, hardware
3707 * does not ensure execution pipeline coherency. Software must set the
3708 * thread control field to ‘switch’ for an instruction that uses
3709 * control register as an explicit operand."
3710 */
3711 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3712 }
3713
3714 if (bits) {
3715 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3716 brw_imm_ud(bits));
3717 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3718 }
3719 }