i965: Rename brw_inst 3src functions in preparation for align1
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, we automatically reduce it to match the register size.
145 *
146 * In platforms that support fp64 we can emit instructions with a width of
147 * 4 that need two SIMD8 registers and an exec_size of 8 or 16. In these
148 * cases we need to make sure that these instructions have their exec sizes
149 * set properly when they are emitted and we can't rely on this code to fix
150 * it.
151 */
152 bool fix_exec_size;
153 if (devinfo->gen >= 6)
154 fix_exec_size = dest.width < BRW_EXECUTE_4;
155 else
156 fix_exec_size = dest.width < BRW_EXECUTE_8;
157
158 if (fix_exec_size)
159 brw_inst_set_exec_size(devinfo, inst, dest.width);
160 }
161
162 void
163 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
164 {
165 const struct gen_device_info *devinfo = p->devinfo;
166
167 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
168 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
169 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
170 assert(reg.nr < 128);
171
172 gen7_convert_mrf_to_grf(p, &reg);
173
174 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
175 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
176 /* Any source modifiers or regions will be ignored, since this just
177 * identifies the MRF/GRF to start reading the message contents from.
178 * Check for some likely failures.
179 */
180 assert(!reg.negate);
181 assert(!reg.abs);
182 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
183 }
184
185 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
186 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
187 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
188 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
189
190 if (reg.file == BRW_IMMEDIATE_VALUE) {
191 if (reg.type == BRW_REGISTER_TYPE_DF ||
192 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
193 brw_inst_set_imm_df(devinfo, inst, reg.df);
194 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
195 reg.type == BRW_REGISTER_TYPE_Q)
196 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
197 else
198 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
199
200 if (type_sz(reg.type) < 8) {
201 brw_inst_set_src1_reg_file(devinfo, inst,
202 BRW_ARCHITECTURE_REGISTER_FILE);
203 brw_inst_set_src1_reg_hw_type(devinfo, inst,
204 brw_inst_src0_reg_hw_type(devinfo, inst));
205 }
206 } else {
207 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
208 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
209 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
210 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
211 } else {
212 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
213 }
214 } else {
215 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
216
217 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
218 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
219 } else {
220 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
221 }
222 }
223
224 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
225 if (reg.width == BRW_WIDTH_1 &&
226 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
227 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
228 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
229 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
230 } else {
231 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
232 brw_inst_set_src0_width(devinfo, inst, reg.width);
233 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
234 }
235 } else {
236 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
237 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
238 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
239 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
240 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
242 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
244
245 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
246 /* This is an oddity of the fact we're using the same
247 * descriptions for registers in align_16 as align_1:
248 */
249 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
250 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
251 reg.type == BRW_REGISTER_TYPE_DF &&
252 reg.vstride == BRW_VERTICAL_STRIDE_2) {
253 /* From SNB PRM:
254 *
255 * "For Align16 access mode, only encodings of 0000 and 0011
256 * are allowed. Other codes are reserved."
257 *
258 * Presumably the DevSNB behavior applies to IVB as well.
259 */
260 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
261 } else {
262 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
263 }
264 }
265 }
266 }
267
268
269 void
270 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
271 {
272 const struct gen_device_info *devinfo = p->devinfo;
273
274 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
275 assert(reg.nr < 128);
276
277 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
278 *
279 * "Accumulator registers may be accessed explicitly as src0
280 * operands only."
281 */
282 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
283 reg.nr != BRW_ARF_ACCUMULATOR);
284
285 gen7_convert_mrf_to_grf(p, &reg);
286 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
287
288 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
289 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
290 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
291
292 /* Only src1 can be immediate in two-argument instructions.
293 */
294 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
295
296 if (reg.file == BRW_IMMEDIATE_VALUE) {
297 /* two-argument instructions can only use 32-bit immediates */
298 assert(type_sz(reg.type) < 8);
299 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
300 } else {
301 /* This is a hardware restriction, which may or may not be lifted
302 * in the future:
303 */
304 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
305 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
306
307 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
308 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
309 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
310 } else {
311 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
312 }
313
314 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
315 if (reg.width == BRW_WIDTH_1 &&
316 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
317 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
318 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
319 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
320 } else {
321 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
322 brw_inst_set_src1_width(devinfo, inst, reg.width);
323 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
324 }
325 } else {
326 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
327 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
328 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
329 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
330 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
332 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
334
335 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
336 /* This is an oddity of the fact we're using the same
337 * descriptions for registers in align_16 as align_1:
338 */
339 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
340 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
341 reg.type == BRW_REGISTER_TYPE_DF &&
342 reg.vstride == BRW_VERTICAL_STRIDE_2) {
343 /* From SNB PRM:
344 *
345 * "For Align16 access mode, only encodings of 0000 and 0011
346 * are allowed. Other codes are reserved."
347 *
348 * Presumably the DevSNB behavior applies to IVB as well.
349 */
350 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
351 } else {
352 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
353 }
354 }
355 }
356 }
357
358 /**
359 * Set the Message Descriptor and Extended Message Descriptor fields
360 * for SEND messages.
361 *
362 * \note This zeroes out the Function Control bits, so it must be called
363 * \b before filling out any message-specific data. Callers can
364 * choose not to fill in irrelevant bits; they will be zero.
365 */
366 void
367 brw_set_message_descriptor(struct brw_codegen *p,
368 brw_inst *inst,
369 enum brw_message_target sfid,
370 unsigned msg_length,
371 unsigned response_length,
372 bool header_present,
373 bool end_of_thread)
374 {
375 const struct gen_device_info *devinfo = p->devinfo;
376
377 brw_set_src1(p, inst, brw_imm_d(0));
378
379 /* For indirect sends, `inst` will not be the SEND/SENDC instruction
380 * itself; instead, it will be a MOV/OR into the address register.
381 *
382 * In this case, we avoid setting the extended message descriptor bits,
383 * since they go on the later SEND/SENDC instead and if set here would
384 * instead clobber the conditionalmod bits.
385 */
386 unsigned opcode = brw_inst_opcode(devinfo, inst);
387 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
388 brw_inst_set_sfid(devinfo, inst, sfid);
389 }
390
391 brw_inst_set_mlen(devinfo, inst, msg_length);
392 brw_inst_set_rlen(devinfo, inst, response_length);
393 brw_inst_set_eot(devinfo, inst, end_of_thread);
394
395 if (devinfo->gen >= 5) {
396 brw_inst_set_header_present(devinfo, inst, header_present);
397 }
398 }
399
400 static void brw_set_math_message( struct brw_codegen *p,
401 brw_inst *inst,
402 unsigned function,
403 unsigned integer_type,
404 bool low_precision,
405 unsigned dataType )
406 {
407 const struct gen_device_info *devinfo = p->devinfo;
408 unsigned msg_length;
409 unsigned response_length;
410
411 /* Infer message length from the function */
412 switch (function) {
413 case BRW_MATH_FUNCTION_POW:
414 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
415 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
416 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
417 msg_length = 2;
418 break;
419 default:
420 msg_length = 1;
421 break;
422 }
423
424 /* Infer response length from the function */
425 switch (function) {
426 case BRW_MATH_FUNCTION_SINCOS:
427 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
428 response_length = 2;
429 break;
430 default:
431 response_length = 1;
432 break;
433 }
434
435
436 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
437 msg_length, response_length, false, false);
438 brw_inst_set_math_msg_function(devinfo, inst, function);
439 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
440 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
441 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
442 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
443 brw_inst_set_saturate(devinfo, inst, 0);
444 }
445
446
447 static void brw_set_ff_sync_message(struct brw_codegen *p,
448 brw_inst *insn,
449 bool allocate,
450 unsigned response_length,
451 bool end_of_thread)
452 {
453 const struct gen_device_info *devinfo = p->devinfo;
454
455 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
456 1, response_length, true, end_of_thread);
457 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
458 brw_inst_set_urb_allocate(devinfo, insn, allocate);
459 /* The following fields are not used by FF_SYNC: */
460 brw_inst_set_urb_global_offset(devinfo, insn, 0);
461 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
462 brw_inst_set_urb_used(devinfo, insn, 0);
463 brw_inst_set_urb_complete(devinfo, insn, 0);
464 }
465
466 static void brw_set_urb_message( struct brw_codegen *p,
467 brw_inst *insn,
468 enum brw_urb_write_flags flags,
469 unsigned msg_length,
470 unsigned response_length,
471 unsigned offset,
472 unsigned swizzle_control )
473 {
474 const struct gen_device_info *devinfo = p->devinfo;
475
476 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
477 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
478 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
479
480 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
481 msg_length, response_length, true,
482 flags & BRW_URB_WRITE_EOT);
483
484 if (flags & BRW_URB_WRITE_OWORD) {
485 assert(msg_length == 2); /* header + one OWORD of data */
486 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
487 } else {
488 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
489 }
490
491 brw_inst_set_urb_global_offset(devinfo, insn, offset);
492 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
493
494 if (devinfo->gen < 8) {
495 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
496 }
497
498 if (devinfo->gen < 7) {
499 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
500 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
501 } else {
502 brw_inst_set_urb_per_slot_offset(devinfo, insn,
503 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
504 }
505 }
506
507 void
508 brw_set_dp_write_message(struct brw_codegen *p,
509 brw_inst *insn,
510 unsigned binding_table_index,
511 unsigned msg_control,
512 unsigned msg_type,
513 unsigned target_cache,
514 unsigned msg_length,
515 bool header_present,
516 unsigned last_render_target,
517 unsigned response_length,
518 unsigned end_of_thread,
519 unsigned send_commit_msg)
520 {
521 const struct gen_device_info *devinfo = p->devinfo;
522 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
523 BRW_SFID_DATAPORT_WRITE);
524
525 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
526 header_present, end_of_thread);
527
528 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
529 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
530 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
531 brw_inst_set_rt_last(devinfo, insn, last_render_target);
532 if (devinfo->gen < 7) {
533 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
534 }
535 }
536
537 void
538 brw_set_dp_read_message(struct brw_codegen *p,
539 brw_inst *insn,
540 unsigned binding_table_index,
541 unsigned msg_control,
542 unsigned msg_type,
543 unsigned target_cache,
544 unsigned msg_length,
545 bool header_present,
546 unsigned response_length)
547 {
548 const struct gen_device_info *devinfo = p->devinfo;
549 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
550 BRW_SFID_DATAPORT_READ);
551
552 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
553 header_present, false);
554
555 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
556 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
557 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
558 if (devinfo->gen < 6)
559 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
560 }
561
562 void
563 brw_set_sampler_message(struct brw_codegen *p,
564 brw_inst *inst,
565 unsigned binding_table_index,
566 unsigned sampler,
567 unsigned msg_type,
568 unsigned response_length,
569 unsigned msg_length,
570 unsigned header_present,
571 unsigned simd_mode,
572 unsigned return_format)
573 {
574 const struct gen_device_info *devinfo = p->devinfo;
575
576 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
577 response_length, header_present, false);
578
579 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
580 brw_inst_set_sampler(devinfo, inst, sampler);
581 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
582 if (devinfo->gen >= 5) {
583 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
584 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
585 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
586 }
587 }
588
589 static void
590 gen7_set_dp_scratch_message(struct brw_codegen *p,
591 brw_inst *inst,
592 bool write,
593 bool dword,
594 bool invalidate_after_read,
595 unsigned num_regs,
596 unsigned addr_offset,
597 unsigned mlen,
598 unsigned rlen,
599 bool header_present)
600 {
601 const struct gen_device_info *devinfo = p->devinfo;
602 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
603 (devinfo->gen >= 8 && num_regs == 8));
604 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
605 num_regs - 1);
606
607 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
608 mlen, rlen, header_present, false);
609 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
610 brw_inst_set_scratch_read_write(devinfo, inst, write);
611 brw_inst_set_scratch_type(devinfo, inst, dword);
612 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
613 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
614 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
615 }
616
617 #define next_insn brw_next_insn
618 brw_inst *
619 brw_next_insn(struct brw_codegen *p, unsigned opcode)
620 {
621 const struct gen_device_info *devinfo = p->devinfo;
622 brw_inst *insn;
623
624 if (p->nr_insn + 1 > p->store_size) {
625 p->store_size <<= 1;
626 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
627 }
628
629 p->next_insn_offset += 16;
630 insn = &p->store[p->nr_insn++];
631 memcpy(insn, p->current, sizeof(*insn));
632
633 brw_inst_set_opcode(devinfo, insn, opcode);
634 return insn;
635 }
636
637 static brw_inst *
638 brw_alu1(struct brw_codegen *p, unsigned opcode,
639 struct brw_reg dest, struct brw_reg src)
640 {
641 brw_inst *insn = next_insn(p, opcode);
642 brw_set_dest(p, insn, dest);
643 brw_set_src0(p, insn, src);
644 return insn;
645 }
646
647 static brw_inst *
648 brw_alu2(struct brw_codegen *p, unsigned opcode,
649 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
650 {
651 /* 64-bit immediates are only supported on 1-src instructions */
652 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
653 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
654
655 brw_inst *insn = next_insn(p, opcode);
656 brw_set_dest(p, insn, dest);
657 brw_set_src0(p, insn, src0);
658 brw_set_src1(p, insn, src1);
659 return insn;
660 }
661
662 static int
663 get_3src_subreg_nr(struct brw_reg reg)
664 {
665 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
666 * use 32-bit units (components 0..7). Since they only support F/D/UD
667 * types, this doesn't lose any flexibility, but uses fewer bits.
668 */
669 return reg.subnr / 4;
670 }
671
672 static brw_inst *
673 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
674 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
675 {
676 const struct gen_device_info *devinfo = p->devinfo;
677 brw_inst *inst = next_insn(p, opcode);
678
679 gen7_convert_mrf_to_grf(p, &dest);
680
681 assert(brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16);
682
683 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
684 dest.file == BRW_MESSAGE_REGISTER_FILE);
685 assert(dest.nr < 128);
686 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
687 assert(dest.type == BRW_REGISTER_TYPE_F ||
688 dest.type == BRW_REGISTER_TYPE_DF ||
689 dest.type == BRW_REGISTER_TYPE_D ||
690 dest.type == BRW_REGISTER_TYPE_UD);
691 if (devinfo->gen == 6) {
692 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
693 dest.file == BRW_MESSAGE_REGISTER_FILE);
694 }
695 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
696 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
697 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
698
699 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
700 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
701 assert(src0.nr < 128);
702 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
703 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
704 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
705 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
706 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
707 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
708 src0.vstride == BRW_VERTICAL_STRIDE_0);
709
710 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
711 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
712 assert(src1.nr < 128);
713 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
714 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
715 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
716 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
717 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
718 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
719 src1.vstride == BRW_VERTICAL_STRIDE_0);
720
721 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
722 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
723 assert(src2.nr < 128);
724 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
725 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
726 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
727 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
728 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
729 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
730 src2.vstride == BRW_VERTICAL_STRIDE_0);
731
732 if (devinfo->gen >= 7) {
733 /* Set both the source and destination types based on dest.type,
734 * ignoring the source register types. The MAD and LRP emitters ensure
735 * that all four types are float. The BFE and BFI2 emitters, however,
736 * may send us mixed D and UD types and want us to ignore that and use
737 * the destination type.
738 */
739 switch (dest.type) {
740 case BRW_REGISTER_TYPE_F:
741 brw_inst_set_3src_a16_src_type(devinfo, inst, BRW_3SRC_TYPE_F);
742 brw_inst_set_3src_a16_dst_type(devinfo, inst, BRW_3SRC_TYPE_F);
743 break;
744 case BRW_REGISTER_TYPE_DF:
745 brw_inst_set_3src_a16_src_type(devinfo, inst, BRW_3SRC_TYPE_DF);
746 brw_inst_set_3src_a16_dst_type(devinfo, inst, BRW_3SRC_TYPE_DF);
747 break;
748 case BRW_REGISTER_TYPE_D:
749 brw_inst_set_3src_a16_src_type(devinfo, inst, BRW_3SRC_TYPE_D);
750 brw_inst_set_3src_a16_dst_type(devinfo, inst, BRW_3SRC_TYPE_D);
751 break;
752 case BRW_REGISTER_TYPE_UD:
753 brw_inst_set_3src_a16_src_type(devinfo, inst, BRW_3SRC_TYPE_UD);
754 brw_inst_set_3src_a16_dst_type(devinfo, inst, BRW_3SRC_TYPE_UD);
755 break;
756 default:
757 unreachable("not reached");
758 }
759 }
760
761 return inst;
762 }
763
764
765 /***********************************************************************
766 * Convenience routines.
767 */
768 #define ALU1(OP) \
769 brw_inst *brw_##OP(struct brw_codegen *p, \
770 struct brw_reg dest, \
771 struct brw_reg src0) \
772 { \
773 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
774 }
775
776 #define ALU2(OP) \
777 brw_inst *brw_##OP(struct brw_codegen *p, \
778 struct brw_reg dest, \
779 struct brw_reg src0, \
780 struct brw_reg src1) \
781 { \
782 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
783 }
784
785 #define ALU3(OP) \
786 brw_inst *brw_##OP(struct brw_codegen *p, \
787 struct brw_reg dest, \
788 struct brw_reg src0, \
789 struct brw_reg src1, \
790 struct brw_reg src2) \
791 { \
792 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
793 }
794
795 #define ALU3F(OP) \
796 brw_inst *brw_##OP(struct brw_codegen *p, \
797 struct brw_reg dest, \
798 struct brw_reg src0, \
799 struct brw_reg src1, \
800 struct brw_reg src2) \
801 { \
802 assert(dest.type == BRW_REGISTER_TYPE_F || \
803 dest.type == BRW_REGISTER_TYPE_DF); \
804 if (dest.type == BRW_REGISTER_TYPE_F) { \
805 assert(src0.type == BRW_REGISTER_TYPE_F); \
806 assert(src1.type == BRW_REGISTER_TYPE_F); \
807 assert(src2.type == BRW_REGISTER_TYPE_F); \
808 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
809 assert(src0.type == BRW_REGISTER_TYPE_DF); \
810 assert(src1.type == BRW_REGISTER_TYPE_DF); \
811 assert(src2.type == BRW_REGISTER_TYPE_DF); \
812 } \
813 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
814 }
815
816 /* Rounding operations (other than RNDD) require two instructions - the first
817 * stores a rounded value (possibly the wrong way) in the dest register, but
818 * also sets a per-channel "increment bit" in the flag register. A predicated
819 * add of 1.0 fixes dest to contain the desired result.
820 *
821 * Sandybridge and later appear to round correctly without an ADD.
822 */
823 #define ROUND(OP) \
824 void brw_##OP(struct brw_codegen *p, \
825 struct brw_reg dest, \
826 struct brw_reg src) \
827 { \
828 const struct gen_device_info *devinfo = p->devinfo; \
829 brw_inst *rnd, *add; \
830 rnd = next_insn(p, BRW_OPCODE_##OP); \
831 brw_set_dest(p, rnd, dest); \
832 brw_set_src0(p, rnd, src); \
833 \
834 if (devinfo->gen < 6) { \
835 /* turn on round-increments */ \
836 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
837 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
838 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
839 } \
840 }
841
842
843 ALU2(SEL)
844 ALU1(NOT)
845 ALU2(AND)
846 ALU2(OR)
847 ALU2(XOR)
848 ALU2(SHR)
849 ALU2(SHL)
850 ALU1(DIM)
851 ALU2(ASR)
852 ALU1(FRC)
853 ALU1(RNDD)
854 ALU2(MAC)
855 ALU2(MACH)
856 ALU1(LZD)
857 ALU2(DP4)
858 ALU2(DPH)
859 ALU2(DP3)
860 ALU2(DP2)
861 ALU3F(MAD)
862 ALU3F(LRP)
863 ALU1(BFREV)
864 ALU3(BFE)
865 ALU2(BFI1)
866 ALU3(BFI2)
867 ALU1(FBH)
868 ALU1(FBL)
869 ALU1(CBIT)
870 ALU2(ADDC)
871 ALU2(SUBB)
872
873 ROUND(RNDZ)
874 ROUND(RNDE)
875
876 brw_inst *
877 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
878 {
879 const struct gen_device_info *devinfo = p->devinfo;
880
881 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
882 * To avoid the problems that causes, we use a <1,2,0> source region to read
883 * each element twice.
884 */
885 if (devinfo->gen == 7 && !devinfo->is_haswell &&
886 brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1 &&
887 dest.type == BRW_REGISTER_TYPE_DF &&
888 (src0.type == BRW_REGISTER_TYPE_F ||
889 src0.type == BRW_REGISTER_TYPE_D ||
890 src0.type == BRW_REGISTER_TYPE_UD) &&
891 !has_scalar_region(src0)) {
892 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
893 src0.width == BRW_WIDTH_4 &&
894 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
895
896 src0.vstride = BRW_VERTICAL_STRIDE_1;
897 src0.width = BRW_WIDTH_2;
898 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
899 }
900
901 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
902 }
903
904 brw_inst *
905 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
906 struct brw_reg src0, struct brw_reg src1)
907 {
908 /* 6.2.2: add */
909 if (src0.type == BRW_REGISTER_TYPE_F ||
910 (src0.file == BRW_IMMEDIATE_VALUE &&
911 src0.type == BRW_REGISTER_TYPE_VF)) {
912 assert(src1.type != BRW_REGISTER_TYPE_UD);
913 assert(src1.type != BRW_REGISTER_TYPE_D);
914 }
915
916 if (src1.type == BRW_REGISTER_TYPE_F ||
917 (src1.file == BRW_IMMEDIATE_VALUE &&
918 src1.type == BRW_REGISTER_TYPE_VF)) {
919 assert(src0.type != BRW_REGISTER_TYPE_UD);
920 assert(src0.type != BRW_REGISTER_TYPE_D);
921 }
922
923 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
924 }
925
926 brw_inst *
927 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
928 struct brw_reg src0, struct brw_reg src1)
929 {
930 assert(dest.type == src0.type);
931 assert(src0.type == src1.type);
932 switch (src0.type) {
933 case BRW_REGISTER_TYPE_B:
934 case BRW_REGISTER_TYPE_UB:
935 case BRW_REGISTER_TYPE_W:
936 case BRW_REGISTER_TYPE_UW:
937 case BRW_REGISTER_TYPE_D:
938 case BRW_REGISTER_TYPE_UD:
939 break;
940 default:
941 unreachable("Bad type for brw_AVG");
942 }
943
944 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
945 }
946
947 brw_inst *
948 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
949 struct brw_reg src0, struct brw_reg src1)
950 {
951 /* 6.32.38: mul */
952 if (src0.type == BRW_REGISTER_TYPE_D ||
953 src0.type == BRW_REGISTER_TYPE_UD ||
954 src1.type == BRW_REGISTER_TYPE_D ||
955 src1.type == BRW_REGISTER_TYPE_UD) {
956 assert(dest.type != BRW_REGISTER_TYPE_F);
957 }
958
959 if (src0.type == BRW_REGISTER_TYPE_F ||
960 (src0.file == BRW_IMMEDIATE_VALUE &&
961 src0.type == BRW_REGISTER_TYPE_VF)) {
962 assert(src1.type != BRW_REGISTER_TYPE_UD);
963 assert(src1.type != BRW_REGISTER_TYPE_D);
964 }
965
966 if (src1.type == BRW_REGISTER_TYPE_F ||
967 (src1.file == BRW_IMMEDIATE_VALUE &&
968 src1.type == BRW_REGISTER_TYPE_VF)) {
969 assert(src0.type != BRW_REGISTER_TYPE_UD);
970 assert(src0.type != BRW_REGISTER_TYPE_D);
971 }
972
973 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
974 src0.nr != BRW_ARF_ACCUMULATOR);
975 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
976 src1.nr != BRW_ARF_ACCUMULATOR);
977
978 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
979 }
980
981 brw_inst *
982 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
983 struct brw_reg src0, struct brw_reg src1)
984 {
985 src0.vstride = BRW_VERTICAL_STRIDE_0;
986 src0.width = BRW_WIDTH_1;
987 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
988 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
989 }
990
991 brw_inst *
992 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
993 struct brw_reg src0, struct brw_reg src1)
994 {
995 src0.vstride = BRW_VERTICAL_STRIDE_0;
996 src0.width = BRW_WIDTH_1;
997 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
998 src1.vstride = BRW_VERTICAL_STRIDE_8;
999 src1.width = BRW_WIDTH_8;
1000 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1001 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1002 }
1003
1004 brw_inst *
1005 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1006 {
1007 const struct gen_device_info *devinfo = p->devinfo;
1008 const bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1009 /* The F32TO16 instruction doesn't support 32-bit destination types in
1010 * Align1 mode, and neither does the Gen8 implementation in terms of a
1011 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1012 * an undocumented feature.
1013 */
1014 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1015 (!align16 || devinfo->gen >= 8));
1016 brw_inst *inst;
1017
1018 if (align16) {
1019 assert(dst.type == BRW_REGISTER_TYPE_UD);
1020 } else {
1021 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1022 dst.type == BRW_REGISTER_TYPE_W ||
1023 dst.type == BRW_REGISTER_TYPE_UW ||
1024 dst.type == BRW_REGISTER_TYPE_HF);
1025 }
1026
1027 brw_push_insn_state(p);
1028
1029 if (needs_zero_fill) {
1030 brw_set_default_access_mode(p, BRW_ALIGN_1);
1031 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1032 }
1033
1034 if (devinfo->gen >= 8) {
1035 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1036 } else {
1037 assert(devinfo->gen == 7);
1038 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1039 }
1040
1041 if (needs_zero_fill) {
1042 brw_inst_set_no_dd_clear(devinfo, inst, true);
1043 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1044 brw_inst_set_no_dd_check(devinfo, inst, true);
1045 }
1046
1047 brw_pop_insn_state(p);
1048 return inst;
1049 }
1050
1051 brw_inst *
1052 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1053 {
1054 const struct gen_device_info *devinfo = p->devinfo;
1055 bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1056
1057 if (align16) {
1058 assert(src.type == BRW_REGISTER_TYPE_UD);
1059 } else {
1060 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1061 *
1062 * Because this instruction does not have a 16-bit floating-point
1063 * type, the source data type must be Word (W). The destination type
1064 * must be F (Float).
1065 */
1066 if (src.type == BRW_REGISTER_TYPE_UD)
1067 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1068
1069 assert(src.type == BRW_REGISTER_TYPE_W ||
1070 src.type == BRW_REGISTER_TYPE_UW ||
1071 src.type == BRW_REGISTER_TYPE_HF);
1072 }
1073
1074 if (devinfo->gen >= 8) {
1075 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1076 } else {
1077 assert(devinfo->gen == 7);
1078 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1079 }
1080 }
1081
1082
1083 void brw_NOP(struct brw_codegen *p)
1084 {
1085 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1086 memset(insn, 0, sizeof(*insn));
1087 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1088 }
1089
1090
1091
1092
1093
1094 /***********************************************************************
1095 * Comparisons, if/else/endif
1096 */
1097
1098 brw_inst *
1099 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1100 unsigned predicate_control)
1101 {
1102 const struct gen_device_info *devinfo = p->devinfo;
1103 struct brw_reg ip = brw_ip_reg();
1104 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1105
1106 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_2);
1107 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1108 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1109 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1110
1111 return inst;
1112 }
1113
1114 static void
1115 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1116 {
1117 p->if_stack[p->if_stack_depth] = inst - p->store;
1118
1119 p->if_stack_depth++;
1120 if (p->if_stack_array_size <= p->if_stack_depth) {
1121 p->if_stack_array_size *= 2;
1122 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1123 p->if_stack_array_size);
1124 }
1125 }
1126
1127 static brw_inst *
1128 pop_if_stack(struct brw_codegen *p)
1129 {
1130 p->if_stack_depth--;
1131 return &p->store[p->if_stack[p->if_stack_depth]];
1132 }
1133
1134 static void
1135 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1136 {
1137 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1138 p->loop_stack_array_size *= 2;
1139 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1140 p->loop_stack_array_size);
1141 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1142 p->loop_stack_array_size);
1143 }
1144
1145 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1146 p->loop_stack_depth++;
1147 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1148 }
1149
1150 static brw_inst *
1151 get_inner_do_insn(struct brw_codegen *p)
1152 {
1153 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1154 }
1155
1156 /* EU takes the value from the flag register and pushes it onto some
1157 * sort of a stack (presumably merging with any flag value already on
1158 * the stack). Within an if block, the flags at the top of the stack
1159 * control execution on each channel of the unit, eg. on each of the
1160 * 16 pixel values in our wm programs.
1161 *
1162 * When the matching 'else' instruction is reached (presumably by
1163 * countdown of the instruction count patched in by our ELSE/ENDIF
1164 * functions), the relevant flags are inverted.
1165 *
1166 * When the matching 'endif' instruction is reached, the flags are
1167 * popped off. If the stack is now empty, normal execution resumes.
1168 */
1169 brw_inst *
1170 brw_IF(struct brw_codegen *p, unsigned execute_size)
1171 {
1172 const struct gen_device_info *devinfo = p->devinfo;
1173 brw_inst *insn;
1174
1175 insn = next_insn(p, BRW_OPCODE_IF);
1176
1177 /* Override the defaults for this instruction:
1178 */
1179 if (devinfo->gen < 6) {
1180 brw_set_dest(p, insn, brw_ip_reg());
1181 brw_set_src0(p, insn, brw_ip_reg());
1182 brw_set_src1(p, insn, brw_imm_d(0x0));
1183 } else if (devinfo->gen == 6) {
1184 brw_set_dest(p, insn, brw_imm_w(0));
1185 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1186 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1187 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1188 } else if (devinfo->gen == 7) {
1189 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1190 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1191 brw_set_src1(p, insn, brw_imm_w(0));
1192 brw_inst_set_jip(devinfo, insn, 0);
1193 brw_inst_set_uip(devinfo, insn, 0);
1194 } else {
1195 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1196 brw_set_src0(p, insn, brw_imm_d(0));
1197 brw_inst_set_jip(devinfo, insn, 0);
1198 brw_inst_set_uip(devinfo, insn, 0);
1199 }
1200
1201 brw_inst_set_exec_size(devinfo, insn, execute_size);
1202 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1203 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1204 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1205 if (!p->single_program_flow && devinfo->gen < 6)
1206 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1207
1208 push_if_stack(p, insn);
1209 p->if_depth_in_loop[p->loop_stack_depth]++;
1210 return insn;
1211 }
1212
1213 /* This function is only used for gen6-style IF instructions with an
1214 * embedded comparison (conditional modifier). It is not used on gen7.
1215 */
1216 brw_inst *
1217 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1218 struct brw_reg src0, struct brw_reg src1)
1219 {
1220 const struct gen_device_info *devinfo = p->devinfo;
1221 brw_inst *insn;
1222
1223 insn = next_insn(p, BRW_OPCODE_IF);
1224
1225 brw_set_dest(p, insn, brw_imm_w(0));
1226 brw_inst_set_exec_size(devinfo, insn,
1227 brw_inst_exec_size(devinfo, p->current));
1228 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1229 brw_set_src0(p, insn, src0);
1230 brw_set_src1(p, insn, src1);
1231
1232 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1233 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1234 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1235
1236 push_if_stack(p, insn);
1237 return insn;
1238 }
1239
1240 /**
1241 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1242 */
1243 static void
1244 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1245 brw_inst *if_inst, brw_inst *else_inst)
1246 {
1247 const struct gen_device_info *devinfo = p->devinfo;
1248
1249 /* The next instruction (where the ENDIF would be, if it existed) */
1250 brw_inst *next_inst = &p->store[p->nr_insn];
1251
1252 assert(p->single_program_flow);
1253 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1254 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1255 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1256
1257 /* Convert IF to an ADD instruction that moves the instruction pointer
1258 * to the first instruction of the ELSE block. If there is no ELSE
1259 * block, point to where ENDIF would be. Reverse the predicate.
1260 *
1261 * There's no need to execute an ENDIF since we don't need to do any
1262 * stack operations, and if we're currently executing, we just want to
1263 * continue normally.
1264 */
1265 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1266 brw_inst_set_pred_inv(devinfo, if_inst, true);
1267
1268 if (else_inst != NULL) {
1269 /* Convert ELSE to an ADD instruction that points where the ENDIF
1270 * would be.
1271 */
1272 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1273
1274 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1275 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1276 } else {
1277 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1278 }
1279 }
1280
1281 /**
1282 * Patch IF and ELSE instructions with appropriate jump targets.
1283 */
1284 static void
1285 patch_IF_ELSE(struct brw_codegen *p,
1286 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1287 {
1288 const struct gen_device_info *devinfo = p->devinfo;
1289
1290 /* We shouldn't be patching IF and ELSE instructions in single program flow
1291 * mode when gen < 6, because in single program flow mode on those
1292 * platforms, we convert flow control instructions to conditional ADDs that
1293 * operate on IP (see brw_ENDIF).
1294 *
1295 * However, on Gen6, writing to IP doesn't work in single program flow mode
1296 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1297 * not be updated by non-flow control instructions."). And on later
1298 * platforms, there is no significant benefit to converting control flow
1299 * instructions to conditional ADDs. So we do patch IF and ELSE
1300 * instructions in single program flow mode on those platforms.
1301 */
1302 if (devinfo->gen < 6)
1303 assert(!p->single_program_flow);
1304
1305 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1306 assert(endif_inst != NULL);
1307 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1308
1309 unsigned br = brw_jump_scale(devinfo);
1310
1311 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1312 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1313
1314 if (else_inst == NULL) {
1315 /* Patch IF -> ENDIF */
1316 if (devinfo->gen < 6) {
1317 /* Turn it into an IFF, which means no mask stack operations for
1318 * all-false and jumping past the ENDIF.
1319 */
1320 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1321 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1322 br * (endif_inst - if_inst + 1));
1323 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1324 } else if (devinfo->gen == 6) {
1325 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1326 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1327 } else {
1328 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1329 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1330 }
1331 } else {
1332 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1333
1334 /* Patch IF -> ELSE */
1335 if (devinfo->gen < 6) {
1336 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1337 br * (else_inst - if_inst));
1338 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1339 } else if (devinfo->gen == 6) {
1340 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1341 br * (else_inst - if_inst + 1));
1342 }
1343
1344 /* Patch ELSE -> ENDIF */
1345 if (devinfo->gen < 6) {
1346 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1347 * matching ENDIF.
1348 */
1349 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1350 br * (endif_inst - else_inst + 1));
1351 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1352 } else if (devinfo->gen == 6) {
1353 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1354 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1355 br * (endif_inst - else_inst));
1356 } else {
1357 /* The IF instruction's JIP should point just past the ELSE */
1358 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1359 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1360 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1361 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1362 if (devinfo->gen >= 8) {
1363 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1364 * should point to ENDIF.
1365 */
1366 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1367 }
1368 }
1369 }
1370 }
1371
1372 void
1373 brw_ELSE(struct brw_codegen *p)
1374 {
1375 const struct gen_device_info *devinfo = p->devinfo;
1376 brw_inst *insn;
1377
1378 insn = next_insn(p, BRW_OPCODE_ELSE);
1379
1380 if (devinfo->gen < 6) {
1381 brw_set_dest(p, insn, brw_ip_reg());
1382 brw_set_src0(p, insn, brw_ip_reg());
1383 brw_set_src1(p, insn, brw_imm_d(0x0));
1384 } else if (devinfo->gen == 6) {
1385 brw_set_dest(p, insn, brw_imm_w(0));
1386 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1387 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1388 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1389 } else if (devinfo->gen == 7) {
1390 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1391 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1392 brw_set_src1(p, insn, brw_imm_w(0));
1393 brw_inst_set_jip(devinfo, insn, 0);
1394 brw_inst_set_uip(devinfo, insn, 0);
1395 } else {
1396 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1397 brw_set_src0(p, insn, brw_imm_d(0));
1398 brw_inst_set_jip(devinfo, insn, 0);
1399 brw_inst_set_uip(devinfo, insn, 0);
1400 }
1401
1402 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1403 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1404 if (!p->single_program_flow && devinfo->gen < 6)
1405 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1406
1407 push_if_stack(p, insn);
1408 }
1409
1410 void
1411 brw_ENDIF(struct brw_codegen *p)
1412 {
1413 const struct gen_device_info *devinfo = p->devinfo;
1414 brw_inst *insn = NULL;
1415 brw_inst *else_inst = NULL;
1416 brw_inst *if_inst = NULL;
1417 brw_inst *tmp;
1418 bool emit_endif = true;
1419
1420 /* In single program flow mode, we can express IF and ELSE instructions
1421 * equivalently as ADD instructions that operate on IP. On platforms prior
1422 * to Gen6, flow control instructions cause an implied thread switch, so
1423 * this is a significant savings.
1424 *
1425 * However, on Gen6, writing to IP doesn't work in single program flow mode
1426 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1427 * not be updated by non-flow control instructions."). And on later
1428 * platforms, there is no significant benefit to converting control flow
1429 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1430 * Gen5.
1431 */
1432 if (devinfo->gen < 6 && p->single_program_flow)
1433 emit_endif = false;
1434
1435 /*
1436 * A single next_insn() may change the base address of instruction store
1437 * memory(p->store), so call it first before referencing the instruction
1438 * store pointer from an index
1439 */
1440 if (emit_endif)
1441 insn = next_insn(p, BRW_OPCODE_ENDIF);
1442
1443 /* Pop the IF and (optional) ELSE instructions from the stack */
1444 p->if_depth_in_loop[p->loop_stack_depth]--;
1445 tmp = pop_if_stack(p);
1446 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1447 else_inst = tmp;
1448 tmp = pop_if_stack(p);
1449 }
1450 if_inst = tmp;
1451
1452 if (!emit_endif) {
1453 /* ENDIF is useless; don't bother emitting it. */
1454 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1455 return;
1456 }
1457
1458 if (devinfo->gen < 6) {
1459 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1460 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1461 brw_set_src1(p, insn, brw_imm_d(0x0));
1462 } else if (devinfo->gen == 6) {
1463 brw_set_dest(p, insn, brw_imm_w(0));
1464 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1465 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1466 } else if (devinfo->gen == 7) {
1467 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1468 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1469 brw_set_src1(p, insn, brw_imm_w(0));
1470 } else {
1471 brw_set_src0(p, insn, brw_imm_d(0));
1472 }
1473
1474 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1475 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1476 if (devinfo->gen < 6)
1477 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1478
1479 /* Also pop item off the stack in the endif instruction: */
1480 if (devinfo->gen < 6) {
1481 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1482 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1483 } else if (devinfo->gen == 6) {
1484 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1485 } else {
1486 brw_inst_set_jip(devinfo, insn, 2);
1487 }
1488 patch_IF_ELSE(p, if_inst, else_inst, insn);
1489 }
1490
1491 brw_inst *
1492 brw_BREAK(struct brw_codegen *p)
1493 {
1494 const struct gen_device_info *devinfo = p->devinfo;
1495 brw_inst *insn;
1496
1497 insn = next_insn(p, BRW_OPCODE_BREAK);
1498 if (devinfo->gen >= 8) {
1499 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1500 brw_set_src0(p, insn, brw_imm_d(0x0));
1501 } else if (devinfo->gen >= 6) {
1502 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1503 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1504 brw_set_src1(p, insn, brw_imm_d(0x0));
1505 } else {
1506 brw_set_dest(p, insn, brw_ip_reg());
1507 brw_set_src0(p, insn, brw_ip_reg());
1508 brw_set_src1(p, insn, brw_imm_d(0x0));
1509 brw_inst_set_gen4_pop_count(devinfo, insn,
1510 p->if_depth_in_loop[p->loop_stack_depth]);
1511 }
1512 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1513 brw_inst_set_exec_size(devinfo, insn,
1514 brw_inst_exec_size(devinfo, p->current));
1515
1516 return insn;
1517 }
1518
1519 brw_inst *
1520 brw_CONT(struct brw_codegen *p)
1521 {
1522 const struct gen_device_info *devinfo = p->devinfo;
1523 brw_inst *insn;
1524
1525 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1526 brw_set_dest(p, insn, brw_ip_reg());
1527 if (devinfo->gen >= 8) {
1528 brw_set_src0(p, insn, brw_imm_d(0x0));
1529 } else {
1530 brw_set_src0(p, insn, brw_ip_reg());
1531 brw_set_src1(p, insn, brw_imm_d(0x0));
1532 }
1533
1534 if (devinfo->gen < 6) {
1535 brw_inst_set_gen4_pop_count(devinfo, insn,
1536 p->if_depth_in_loop[p->loop_stack_depth]);
1537 }
1538 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1539 brw_inst_set_exec_size(devinfo, insn,
1540 brw_inst_exec_size(devinfo, p->current));
1541 return insn;
1542 }
1543
1544 brw_inst *
1545 gen6_HALT(struct brw_codegen *p)
1546 {
1547 const struct gen_device_info *devinfo = p->devinfo;
1548 brw_inst *insn;
1549
1550 insn = next_insn(p, BRW_OPCODE_HALT);
1551 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1552 if (devinfo->gen >= 8) {
1553 brw_set_src0(p, insn, brw_imm_d(0x0));
1554 } else {
1555 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1556 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1557 }
1558
1559 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1560 brw_inst_set_exec_size(devinfo, insn,
1561 brw_inst_exec_size(devinfo, p->current));
1562 return insn;
1563 }
1564
1565 /* DO/WHILE loop:
1566 *
1567 * The DO/WHILE is just an unterminated loop -- break or continue are
1568 * used for control within the loop. We have a few ways they can be
1569 * done.
1570 *
1571 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1572 * jip and no DO instruction.
1573 *
1574 * For non-uniform control flow pre-gen6, there's a DO instruction to
1575 * push the mask, and a WHILE to jump back, and BREAK to get out and
1576 * pop the mask.
1577 *
1578 * For gen6, there's no more mask stack, so no need for DO. WHILE
1579 * just points back to the first instruction of the loop.
1580 */
1581 brw_inst *
1582 brw_DO(struct brw_codegen *p, unsigned execute_size)
1583 {
1584 const struct gen_device_info *devinfo = p->devinfo;
1585
1586 if (devinfo->gen >= 6 || p->single_program_flow) {
1587 push_loop_stack(p, &p->store[p->nr_insn]);
1588 return &p->store[p->nr_insn];
1589 } else {
1590 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1591
1592 push_loop_stack(p, insn);
1593
1594 /* Override the defaults for this instruction:
1595 */
1596 brw_set_dest(p, insn, brw_null_reg());
1597 brw_set_src0(p, insn, brw_null_reg());
1598 brw_set_src1(p, insn, brw_null_reg());
1599
1600 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1601 brw_inst_set_exec_size(devinfo, insn, execute_size);
1602 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1603
1604 return insn;
1605 }
1606 }
1607
1608 /**
1609 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1610 * instruction here.
1611 *
1612 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1613 * nesting, since it can always just point to the end of the block/current loop.
1614 */
1615 static void
1616 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1617 {
1618 const struct gen_device_info *devinfo = p->devinfo;
1619 brw_inst *do_inst = get_inner_do_insn(p);
1620 brw_inst *inst;
1621 unsigned br = brw_jump_scale(devinfo);
1622
1623 assert(devinfo->gen < 6);
1624
1625 for (inst = while_inst - 1; inst != do_inst; inst--) {
1626 /* If the jump count is != 0, that means that this instruction has already
1627 * been patched because it's part of a loop inside of the one we're
1628 * patching.
1629 */
1630 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1631 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1632 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1633 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1634 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1635 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1636 }
1637 }
1638 }
1639
1640 brw_inst *
1641 brw_WHILE(struct brw_codegen *p)
1642 {
1643 const struct gen_device_info *devinfo = p->devinfo;
1644 brw_inst *insn, *do_insn;
1645 unsigned br = brw_jump_scale(devinfo);
1646
1647 if (devinfo->gen >= 6) {
1648 insn = next_insn(p, BRW_OPCODE_WHILE);
1649 do_insn = get_inner_do_insn(p);
1650
1651 if (devinfo->gen >= 8) {
1652 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1653 brw_set_src0(p, insn, brw_imm_d(0));
1654 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1655 } else if (devinfo->gen == 7) {
1656 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1657 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1658 brw_set_src1(p, insn, brw_imm_w(0));
1659 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1660 } else {
1661 brw_set_dest(p, insn, brw_imm_w(0));
1662 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1663 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1664 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1665 }
1666
1667 brw_inst_set_exec_size(devinfo, insn,
1668 brw_inst_exec_size(devinfo, p->current));
1669
1670 } else {
1671 if (p->single_program_flow) {
1672 insn = next_insn(p, BRW_OPCODE_ADD);
1673 do_insn = get_inner_do_insn(p);
1674
1675 brw_set_dest(p, insn, brw_ip_reg());
1676 brw_set_src0(p, insn, brw_ip_reg());
1677 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1678 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1679 } else {
1680 insn = next_insn(p, BRW_OPCODE_WHILE);
1681 do_insn = get_inner_do_insn(p);
1682
1683 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1684
1685 brw_set_dest(p, insn, brw_ip_reg());
1686 brw_set_src0(p, insn, brw_ip_reg());
1687 brw_set_src1(p, insn, brw_imm_d(0));
1688
1689 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1690 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1691 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1692
1693 brw_patch_break_cont(p, insn);
1694 }
1695 }
1696 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1697
1698 p->loop_stack_depth--;
1699
1700 return insn;
1701 }
1702
1703 /* FORWARD JUMPS:
1704 */
1705 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1706 {
1707 const struct gen_device_info *devinfo = p->devinfo;
1708 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1709 unsigned jmpi = 1;
1710
1711 if (devinfo->gen >= 5)
1712 jmpi = 2;
1713
1714 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1715 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1716
1717 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1718 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1719 }
1720
1721 /* To integrate with the above, it makes sense that the comparison
1722 * instruction should populate the flag register. It might be simpler
1723 * just to use the flag reg for most WM tasks?
1724 */
1725 void brw_CMP(struct brw_codegen *p,
1726 struct brw_reg dest,
1727 unsigned conditional,
1728 struct brw_reg src0,
1729 struct brw_reg src1)
1730 {
1731 const struct gen_device_info *devinfo = p->devinfo;
1732 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1733
1734 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1735 brw_set_dest(p, insn, dest);
1736 brw_set_src0(p, insn, src0);
1737 brw_set_src1(p, insn, src1);
1738
1739 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1740 * page says:
1741 * "Any CMP instruction with a null destination must use a {switch}."
1742 *
1743 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1744 * mentioned on their work-arounds pages.
1745 */
1746 if (devinfo->gen == 7) {
1747 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1748 dest.nr == BRW_ARF_NULL) {
1749 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1750 }
1751 }
1752 }
1753
1754 /***********************************************************************
1755 * Helpers for the various SEND message types:
1756 */
1757
1758 /** Extended math function, float[8].
1759 */
1760 void gen4_math(struct brw_codegen *p,
1761 struct brw_reg dest,
1762 unsigned function,
1763 unsigned msg_reg_nr,
1764 struct brw_reg src,
1765 unsigned precision )
1766 {
1767 const struct gen_device_info *devinfo = p->devinfo;
1768 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1769 unsigned data_type;
1770 if (has_scalar_region(src)) {
1771 data_type = BRW_MATH_DATA_SCALAR;
1772 } else {
1773 data_type = BRW_MATH_DATA_VECTOR;
1774 }
1775
1776 assert(devinfo->gen < 6);
1777
1778 /* Example code doesn't set predicate_control for send
1779 * instructions.
1780 */
1781 brw_inst_set_pred_control(devinfo, insn, 0);
1782 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1783
1784 brw_set_dest(p, insn, dest);
1785 brw_set_src0(p, insn, src);
1786 brw_set_math_message(p,
1787 insn,
1788 function,
1789 src.type == BRW_REGISTER_TYPE_D,
1790 precision,
1791 data_type);
1792 }
1793
1794 void gen6_math(struct brw_codegen *p,
1795 struct brw_reg dest,
1796 unsigned function,
1797 struct brw_reg src0,
1798 struct brw_reg src1)
1799 {
1800 const struct gen_device_info *devinfo = p->devinfo;
1801 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1802
1803 assert(devinfo->gen >= 6);
1804
1805 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1806 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1807
1808 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1809 if (devinfo->gen == 6) {
1810 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1811 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1812 }
1813
1814 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1815 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1816 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1817 assert(src0.type != BRW_REGISTER_TYPE_F);
1818 assert(src1.type != BRW_REGISTER_TYPE_F);
1819 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1820 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1821 } else {
1822 assert(src0.type == BRW_REGISTER_TYPE_F);
1823 assert(src1.type == BRW_REGISTER_TYPE_F);
1824 }
1825
1826 /* Source modifiers are ignored for extended math instructions on Gen6. */
1827 if (devinfo->gen == 6) {
1828 assert(!src0.negate);
1829 assert(!src0.abs);
1830 assert(!src1.negate);
1831 assert(!src1.abs);
1832 }
1833
1834 brw_inst_set_math_function(devinfo, insn, function);
1835
1836 brw_set_dest(p, insn, dest);
1837 brw_set_src0(p, insn, src0);
1838 brw_set_src1(p, insn, src1);
1839 }
1840
1841 /**
1842 * Return the right surface index to access the thread scratch space using
1843 * stateless dataport messages.
1844 */
1845 unsigned
1846 brw_scratch_surface_idx(const struct brw_codegen *p)
1847 {
1848 /* The scratch space is thread-local so IA coherency is unnecessary. */
1849 if (p->devinfo->gen >= 8)
1850 return GEN8_BTI_STATELESS_NON_COHERENT;
1851 else
1852 return BRW_BTI_STATELESS;
1853 }
1854
1855 /**
1856 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1857 * using a constant offset per channel.
1858 *
1859 * The offset must be aligned to oword size (16 bytes). Used for
1860 * register spilling.
1861 */
1862 void brw_oword_block_write_scratch(struct brw_codegen *p,
1863 struct brw_reg mrf,
1864 int num_regs,
1865 unsigned offset)
1866 {
1867 const struct gen_device_info *devinfo = p->devinfo;
1868 const unsigned target_cache =
1869 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1870 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1871 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
1872 uint32_t msg_type;
1873
1874 if (devinfo->gen >= 6)
1875 offset /= 16;
1876
1877 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1878
1879 const unsigned mlen = 1 + num_regs;
1880
1881 /* Set up the message header. This is g0, with g0.2 filled with
1882 * the offset. We don't want to leave our offset around in g0 or
1883 * it'll screw up texture samples, so set it up inside the message
1884 * reg.
1885 */
1886 {
1887 brw_push_insn_state(p);
1888 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1889 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1890 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1891
1892 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1893
1894 /* set message header global offset field (reg 0, element 2) */
1895 brw_MOV(p,
1896 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1897 mrf.nr,
1898 2), BRW_REGISTER_TYPE_UD),
1899 brw_imm_ud(offset));
1900
1901 brw_pop_insn_state(p);
1902 }
1903
1904 {
1905 struct brw_reg dest;
1906 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1907 int send_commit_msg;
1908 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1909 BRW_REGISTER_TYPE_UW);
1910
1911 brw_inst_set_compression(devinfo, insn, false);
1912
1913 if (brw_inst_exec_size(devinfo, insn) >= 16)
1914 src_header = vec16(src_header);
1915
1916 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1917 if (devinfo->gen < 6)
1918 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
1919
1920 /* Until gen6, writes followed by reads from the same location
1921 * are not guaranteed to be ordered unless write_commit is set.
1922 * If set, then a no-op write is issued to the destination
1923 * register to set a dependency, and a read from the destination
1924 * can be used to ensure the ordering.
1925 *
1926 * For gen6, only writes between different threads need ordering
1927 * protection. Our use of DP writes is all about register
1928 * spilling within a thread.
1929 */
1930 if (devinfo->gen >= 6) {
1931 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1932 send_commit_msg = 0;
1933 } else {
1934 dest = src_header;
1935 send_commit_msg = 1;
1936 }
1937
1938 brw_set_dest(p, insn, dest);
1939 if (devinfo->gen >= 6) {
1940 brw_set_src0(p, insn, mrf);
1941 } else {
1942 brw_set_src0(p, insn, brw_null_reg());
1943 }
1944
1945 if (devinfo->gen >= 6)
1946 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
1947 else
1948 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
1949
1950 brw_set_dp_write_message(p,
1951 insn,
1952 brw_scratch_surface_idx(p),
1953 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
1954 msg_type,
1955 target_cache,
1956 mlen,
1957 true, /* header_present */
1958 0, /* not a render target */
1959 send_commit_msg, /* response_length */
1960 0, /* eot */
1961 send_commit_msg);
1962 }
1963 }
1964
1965
1966 /**
1967 * Read a block of owords (half a GRF each) from the scratch buffer
1968 * using a constant index per channel.
1969 *
1970 * Offset must be aligned to oword size (16 bytes). Used for register
1971 * spilling.
1972 */
1973 void
1974 brw_oword_block_read_scratch(struct brw_codegen *p,
1975 struct brw_reg dest,
1976 struct brw_reg mrf,
1977 int num_regs,
1978 unsigned offset)
1979 {
1980 const struct gen_device_info *devinfo = p->devinfo;
1981
1982 if (devinfo->gen >= 6)
1983 offset /= 16;
1984
1985 if (p->devinfo->gen >= 7) {
1986 /* On gen 7 and above, we no longer have message registers and we can
1987 * send from any register we want. By using the destination register
1988 * for the message, we guarantee that the implied message write won't
1989 * accidentally overwrite anything. This has been a problem because
1990 * the MRF registers and source for the final FB write are both fixed
1991 * and may overlap.
1992 */
1993 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
1994 } else {
1995 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1996 }
1997 dest = retype(dest, BRW_REGISTER_TYPE_UW);
1998
1999 const unsigned rlen = num_regs;
2000 const unsigned target_cache =
2001 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2002 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2003 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2004
2005 {
2006 brw_push_insn_state(p);
2007 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2008 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2009 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2010
2011 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2012
2013 /* set message header global offset field (reg 0, element 2) */
2014 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2015
2016 brw_pop_insn_state(p);
2017 }
2018
2019 {
2020 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2021
2022 assert(brw_inst_pred_control(devinfo, insn) == 0);
2023 brw_inst_set_compression(devinfo, insn, false);
2024
2025 brw_set_dest(p, insn, dest); /* UW? */
2026 if (devinfo->gen >= 6) {
2027 brw_set_src0(p, insn, mrf);
2028 } else {
2029 brw_set_src0(p, insn, brw_null_reg());
2030 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2031 }
2032
2033 brw_set_dp_read_message(p,
2034 insn,
2035 brw_scratch_surface_idx(p),
2036 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2037 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2038 target_cache,
2039 1, /* msg_length */
2040 true, /* header_present */
2041 rlen);
2042 }
2043 }
2044
2045 void
2046 gen7_block_read_scratch(struct brw_codegen *p,
2047 struct brw_reg dest,
2048 int num_regs,
2049 unsigned offset)
2050 {
2051 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2052 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2053
2054 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2055
2056 /* The HW requires that the header is present; this is to get the g0.5
2057 * scratch offset.
2058 */
2059 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2060
2061 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2062 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2063 * is 32 bytes, which happens to be the size of a register.
2064 */
2065 offset /= REG_SIZE;
2066 assert(offset < (1 << 12));
2067
2068 gen7_set_dp_scratch_message(p, insn,
2069 false, /* scratch read */
2070 false, /* OWords */
2071 false, /* invalidate after read */
2072 num_regs,
2073 offset,
2074 1, /* mlen: just g0 */
2075 num_regs, /* rlen */
2076 true); /* header present */
2077 }
2078
2079 /**
2080 * Read float[4] vectors from the data port constant cache.
2081 * Location (in buffer) should be a multiple of 16.
2082 * Used for fetching shader constants.
2083 */
2084 void brw_oword_block_read(struct brw_codegen *p,
2085 struct brw_reg dest,
2086 struct brw_reg mrf,
2087 uint32_t offset,
2088 uint32_t bind_table_index)
2089 {
2090 const struct gen_device_info *devinfo = p->devinfo;
2091 const unsigned target_cache =
2092 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2093 BRW_DATAPORT_READ_TARGET_DATA_CACHE);
2094 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
2095
2096 /* On newer hardware, offset is in units of owords. */
2097 if (devinfo->gen >= 6)
2098 offset /= 16;
2099
2100 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2101
2102 brw_push_insn_state(p);
2103 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2104 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2105 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2106
2107 brw_push_insn_state(p);
2108 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2109 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2110
2111 /* set message header global offset field (reg 0, element 2) */
2112 brw_MOV(p,
2113 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2114 mrf.nr,
2115 2), BRW_REGISTER_TYPE_UD),
2116 brw_imm_ud(offset));
2117 brw_pop_insn_state(p);
2118
2119 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2120
2121 /* cast dest to a uword[8] vector */
2122 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2123
2124 brw_set_dest(p, insn, dest);
2125 if (devinfo->gen >= 6) {
2126 brw_set_src0(p, insn, mrf);
2127 } else {
2128 brw_set_src0(p, insn, brw_null_reg());
2129 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2130 }
2131
2132 brw_set_dp_read_message(p, insn, bind_table_index,
2133 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2134 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2135 target_cache,
2136 1, /* msg_length */
2137 true, /* header_present */
2138 DIV_ROUND_UP(exec_size, 8)); /* response_length */
2139
2140 brw_pop_insn_state(p);
2141 }
2142
2143
2144 void brw_fb_WRITE(struct brw_codegen *p,
2145 struct brw_reg payload,
2146 struct brw_reg implied_header,
2147 unsigned msg_control,
2148 unsigned binding_table_index,
2149 unsigned msg_length,
2150 unsigned response_length,
2151 bool eot,
2152 bool last_render_target,
2153 bool header_present)
2154 {
2155 const struct gen_device_info *devinfo = p->devinfo;
2156 const unsigned target_cache =
2157 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2158 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2159 brw_inst *insn;
2160 unsigned msg_type;
2161 struct brw_reg dest, src0;
2162
2163 if (brw_inst_exec_size(devinfo, p->current) >= BRW_EXECUTE_16)
2164 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2165 else
2166 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2167
2168 if (devinfo->gen >= 6) {
2169 insn = next_insn(p, BRW_OPCODE_SENDC);
2170 } else {
2171 insn = next_insn(p, BRW_OPCODE_SEND);
2172 }
2173 brw_inst_set_compression(devinfo, insn, false);
2174
2175 if (devinfo->gen >= 6) {
2176 /* headerless version, just submit color payload */
2177 src0 = payload;
2178
2179 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2180 } else {
2181 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2182 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2183 src0 = implied_header;
2184
2185 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2186 }
2187
2188 brw_set_dest(p, insn, dest);
2189 brw_set_src0(p, insn, src0);
2190 brw_set_dp_write_message(p,
2191 insn,
2192 binding_table_index,
2193 msg_control,
2194 msg_type,
2195 target_cache,
2196 msg_length,
2197 header_present,
2198 last_render_target,
2199 response_length,
2200 eot,
2201 0 /* send_commit_msg */);
2202 }
2203
2204 brw_inst *
2205 gen9_fb_READ(struct brw_codegen *p,
2206 struct brw_reg dst,
2207 struct brw_reg payload,
2208 unsigned binding_table_index,
2209 unsigned msg_length,
2210 unsigned response_length,
2211 bool per_sample)
2212 {
2213 const struct gen_device_info *devinfo = p->devinfo;
2214 assert(devinfo->gen >= 9);
2215 const unsigned msg_subtype =
2216 brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16 ? 0 : 1;
2217 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2218
2219 brw_set_dest(p, insn, dst);
2220 brw_set_src0(p, insn, payload);
2221 brw_set_dp_read_message(p, insn, binding_table_index,
2222 per_sample << 5 | msg_subtype,
2223 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2224 GEN6_SFID_DATAPORT_RENDER_CACHE,
2225 msg_length, true /* header_present */,
2226 response_length);
2227 brw_inst_set_rt_slot_group(devinfo, insn,
2228 brw_inst_qtr_control(devinfo, p->current) / 2);
2229
2230 return insn;
2231 }
2232
2233 /**
2234 * Texture sample instruction.
2235 * Note: the msg_type plus msg_length values determine exactly what kind
2236 * of sampling operation is performed. See volume 4, page 161 of docs.
2237 */
2238 void brw_SAMPLE(struct brw_codegen *p,
2239 struct brw_reg dest,
2240 unsigned msg_reg_nr,
2241 struct brw_reg src0,
2242 unsigned binding_table_index,
2243 unsigned sampler,
2244 unsigned msg_type,
2245 unsigned response_length,
2246 unsigned msg_length,
2247 unsigned header_present,
2248 unsigned simd_mode,
2249 unsigned return_format)
2250 {
2251 const struct gen_device_info *devinfo = p->devinfo;
2252 brw_inst *insn;
2253
2254 if (msg_reg_nr != -1)
2255 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2256
2257 insn = next_insn(p, BRW_OPCODE_SEND);
2258 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2259
2260 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2261 *
2262 * "Instruction compression is not allowed for this instruction (that
2263 * is, send). The hardware behavior is undefined if this instruction is
2264 * set as compressed. However, compress control can be set to "SecHalf"
2265 * to affect the EMask generation."
2266 *
2267 * No similar wording is found in later PRMs, but there are examples
2268 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2269 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2270 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2271 */
2272 brw_inst_set_compression(devinfo, insn, false);
2273
2274 if (devinfo->gen < 6)
2275 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2276
2277 brw_set_dest(p, insn, dest);
2278 brw_set_src0(p, insn, src0);
2279 brw_set_sampler_message(p, insn,
2280 binding_table_index,
2281 sampler,
2282 msg_type,
2283 response_length,
2284 msg_length,
2285 header_present,
2286 simd_mode,
2287 return_format);
2288 }
2289
2290 /* Adjust the message header's sampler state pointer to
2291 * select the correct group of 16 samplers.
2292 */
2293 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2294 struct brw_reg header,
2295 struct brw_reg sampler_index)
2296 {
2297 /* The "Sampler Index" field can only store values between 0 and 15.
2298 * However, we can add an offset to the "Sampler State Pointer"
2299 * field, effectively selecting a different set of 16 samplers.
2300 *
2301 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2302 * offset, and each sampler state is only 16-bytes, so we can't
2303 * exclusively use the offset - we have to use both.
2304 */
2305
2306 const struct gen_device_info *devinfo = p->devinfo;
2307
2308 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2309 const int sampler_state_size = 16; /* 16 bytes */
2310 uint32_t sampler = sampler_index.ud;
2311
2312 if (sampler >= 16) {
2313 assert(devinfo->is_haswell || devinfo->gen >= 8);
2314 brw_ADD(p,
2315 get_element_ud(header, 3),
2316 get_element_ud(brw_vec8_grf(0, 0), 3),
2317 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2318 }
2319 } else {
2320 /* Non-const sampler array indexing case */
2321 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2322 return;
2323 }
2324
2325 struct brw_reg temp = get_element_ud(header, 3);
2326
2327 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2328 brw_SHL(p, temp, temp, brw_imm_ud(4));
2329 brw_ADD(p,
2330 get_element_ud(header, 3),
2331 get_element_ud(brw_vec8_grf(0, 0), 3),
2332 temp);
2333 }
2334 }
2335
2336 /* All these variables are pretty confusing - we might be better off
2337 * using bitmasks and macros for this, in the old style. Or perhaps
2338 * just having the caller instantiate the fields in dword3 itself.
2339 */
2340 void brw_urb_WRITE(struct brw_codegen *p,
2341 struct brw_reg dest,
2342 unsigned msg_reg_nr,
2343 struct brw_reg src0,
2344 enum brw_urb_write_flags flags,
2345 unsigned msg_length,
2346 unsigned response_length,
2347 unsigned offset,
2348 unsigned swizzle)
2349 {
2350 const struct gen_device_info *devinfo = p->devinfo;
2351 brw_inst *insn;
2352
2353 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2354
2355 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2356 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2357 brw_push_insn_state(p);
2358 brw_set_default_access_mode(p, BRW_ALIGN_1);
2359 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2360 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2361 BRW_REGISTER_TYPE_UD),
2362 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2363 brw_imm_ud(0xff00));
2364 brw_pop_insn_state(p);
2365 }
2366
2367 insn = next_insn(p, BRW_OPCODE_SEND);
2368
2369 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2370
2371 brw_set_dest(p, insn, dest);
2372 brw_set_src0(p, insn, src0);
2373 brw_set_src1(p, insn, brw_imm_d(0));
2374
2375 if (devinfo->gen < 6)
2376 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2377
2378 brw_set_urb_message(p,
2379 insn,
2380 flags,
2381 msg_length,
2382 response_length,
2383 offset,
2384 swizzle);
2385 }
2386
2387 struct brw_inst *
2388 brw_send_indirect_message(struct brw_codegen *p,
2389 unsigned sfid,
2390 struct brw_reg dst,
2391 struct brw_reg payload,
2392 struct brw_reg desc)
2393 {
2394 const struct gen_device_info *devinfo = p->devinfo;
2395 struct brw_inst *send;
2396 int setup;
2397
2398 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2399
2400 assert(desc.type == BRW_REGISTER_TYPE_UD);
2401
2402 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2403 * in the indirect case) by its index in the instruction store. The
2404 * pointer returned by next_insn() may become invalid if emitting the SEND
2405 * in the indirect case reallocs the store.
2406 */
2407
2408 if (desc.file == BRW_IMMEDIATE_VALUE) {
2409 setup = p->nr_insn;
2410 send = next_insn(p, BRW_OPCODE_SEND);
2411 brw_set_src1(p, send, desc);
2412
2413 } else {
2414 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2415
2416 brw_push_insn_state(p);
2417 brw_set_default_access_mode(p, BRW_ALIGN_1);
2418 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2419 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2420
2421 /* Load the indirect descriptor to an address register using OR so the
2422 * caller can specify additional descriptor bits with the usual
2423 * brw_set_*_message() helper functions.
2424 */
2425 setup = p->nr_insn;
2426 brw_OR(p, addr, desc, brw_imm_ud(0));
2427
2428 brw_pop_insn_state(p);
2429
2430 send = next_insn(p, BRW_OPCODE_SEND);
2431 brw_set_src1(p, send, addr);
2432 }
2433
2434 if (dst.width < BRW_EXECUTE_8)
2435 brw_inst_set_exec_size(devinfo, send, dst.width);
2436
2437 brw_set_dest(p, send, dst);
2438 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2439 brw_inst_set_sfid(devinfo, send, sfid);
2440
2441 return &p->store[setup];
2442 }
2443
2444 static struct brw_inst *
2445 brw_send_indirect_surface_message(struct brw_codegen *p,
2446 unsigned sfid,
2447 struct brw_reg dst,
2448 struct brw_reg payload,
2449 struct brw_reg surface,
2450 unsigned message_len,
2451 unsigned response_len,
2452 bool header_present)
2453 {
2454 const struct gen_device_info *devinfo = p->devinfo;
2455 struct brw_inst *insn;
2456
2457 if (surface.file != BRW_IMMEDIATE_VALUE) {
2458 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2459
2460 brw_push_insn_state(p);
2461 brw_set_default_access_mode(p, BRW_ALIGN_1);
2462 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2463 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2464
2465 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2466 * some surface array is accessed out of bounds.
2467 */
2468 insn = brw_AND(p, addr,
2469 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2470 BRW_GET_SWZ(surface.swizzle, 0)),
2471 brw_imm_ud(0xff));
2472
2473 brw_pop_insn_state(p);
2474
2475 surface = addr;
2476 }
2477
2478 insn = brw_send_indirect_message(p, sfid, dst, payload, surface);
2479 brw_inst_set_mlen(devinfo, insn, message_len);
2480 brw_inst_set_rlen(devinfo, insn, response_len);
2481 brw_inst_set_header_present(devinfo, insn, header_present);
2482
2483 return insn;
2484 }
2485
2486 static bool
2487 while_jumps_before_offset(const struct gen_device_info *devinfo,
2488 brw_inst *insn, int while_offset, int start_offset)
2489 {
2490 int scale = 16 / brw_jump_scale(devinfo);
2491 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2492 : brw_inst_jip(devinfo, insn);
2493 assert(jip < 0);
2494 return while_offset + jip * scale <= start_offset;
2495 }
2496
2497
2498 static int
2499 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2500 {
2501 int offset;
2502 void *store = p->store;
2503 const struct gen_device_info *devinfo = p->devinfo;
2504
2505 int depth = 0;
2506
2507 for (offset = next_offset(devinfo, store, start_offset);
2508 offset < p->next_insn_offset;
2509 offset = next_offset(devinfo, store, offset)) {
2510 brw_inst *insn = store + offset;
2511
2512 switch (brw_inst_opcode(devinfo, insn)) {
2513 case BRW_OPCODE_IF:
2514 depth++;
2515 break;
2516 case BRW_OPCODE_ENDIF:
2517 if (depth == 0)
2518 return offset;
2519 depth--;
2520 break;
2521 case BRW_OPCODE_WHILE:
2522 /* If the while doesn't jump before our instruction, it's the end
2523 * of a sibling do...while loop. Ignore it.
2524 */
2525 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2526 continue;
2527 /* fallthrough */
2528 case BRW_OPCODE_ELSE:
2529 case BRW_OPCODE_HALT:
2530 if (depth == 0)
2531 return offset;
2532 }
2533 }
2534
2535 return 0;
2536 }
2537
2538 /* There is no DO instruction on gen6, so to find the end of the loop
2539 * we have to see if the loop is jumping back before our start
2540 * instruction.
2541 */
2542 static int
2543 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2544 {
2545 const struct gen_device_info *devinfo = p->devinfo;
2546 int offset;
2547 void *store = p->store;
2548
2549 assert(devinfo->gen >= 6);
2550
2551 /* Always start after the instruction (such as a WHILE) we're trying to fix
2552 * up.
2553 */
2554 for (offset = next_offset(devinfo, store, start_offset);
2555 offset < p->next_insn_offset;
2556 offset = next_offset(devinfo, store, offset)) {
2557 brw_inst *insn = store + offset;
2558
2559 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2560 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2561 return offset;
2562 }
2563 }
2564 assert(!"not reached");
2565 return start_offset;
2566 }
2567
2568 /* After program generation, go back and update the UIP and JIP of
2569 * BREAK, CONT, and HALT instructions to their correct locations.
2570 */
2571 void
2572 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2573 {
2574 const struct gen_device_info *devinfo = p->devinfo;
2575 int offset;
2576 int br = brw_jump_scale(devinfo);
2577 int scale = 16 / br;
2578 void *store = p->store;
2579
2580 if (devinfo->gen < 6)
2581 return;
2582
2583 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2584 brw_inst *insn = store + offset;
2585 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2586
2587 int block_end_offset = brw_find_next_block_end(p, offset);
2588 switch (brw_inst_opcode(devinfo, insn)) {
2589 case BRW_OPCODE_BREAK:
2590 assert(block_end_offset != 0);
2591 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2592 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2593 brw_inst_set_uip(devinfo, insn,
2594 (brw_find_loop_end(p, offset) - offset +
2595 (devinfo->gen == 6 ? 16 : 0)) / scale);
2596 break;
2597 case BRW_OPCODE_CONTINUE:
2598 assert(block_end_offset != 0);
2599 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2600 brw_inst_set_uip(devinfo, insn,
2601 (brw_find_loop_end(p, offset) - offset) / scale);
2602
2603 assert(brw_inst_uip(devinfo, insn) != 0);
2604 assert(brw_inst_jip(devinfo, insn) != 0);
2605 break;
2606
2607 case BRW_OPCODE_ENDIF: {
2608 int32_t jump = (block_end_offset == 0) ?
2609 1 * br : (block_end_offset - offset) / scale;
2610 if (devinfo->gen >= 7)
2611 brw_inst_set_jip(devinfo, insn, jump);
2612 else
2613 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2614 break;
2615 }
2616
2617 case BRW_OPCODE_HALT:
2618 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2619 *
2620 * "In case of the halt instruction not inside any conditional
2621 * code block, the value of <JIP> and <UIP> should be the
2622 * same. In case of the halt instruction inside conditional code
2623 * block, the <UIP> should be the end of the program, and the
2624 * <JIP> should be end of the most inner conditional code block."
2625 *
2626 * The uip will have already been set by whoever set up the
2627 * instruction.
2628 */
2629 if (block_end_offset == 0) {
2630 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2631 } else {
2632 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2633 }
2634 assert(brw_inst_uip(devinfo, insn) != 0);
2635 assert(brw_inst_jip(devinfo, insn) != 0);
2636 break;
2637 }
2638 }
2639 }
2640
2641 void brw_ff_sync(struct brw_codegen *p,
2642 struct brw_reg dest,
2643 unsigned msg_reg_nr,
2644 struct brw_reg src0,
2645 bool allocate,
2646 unsigned response_length,
2647 bool eot)
2648 {
2649 const struct gen_device_info *devinfo = p->devinfo;
2650 brw_inst *insn;
2651
2652 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2653
2654 insn = next_insn(p, BRW_OPCODE_SEND);
2655 brw_set_dest(p, insn, dest);
2656 brw_set_src0(p, insn, src0);
2657 brw_set_src1(p, insn, brw_imm_d(0));
2658
2659 if (devinfo->gen < 6)
2660 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2661
2662 brw_set_ff_sync_message(p,
2663 insn,
2664 allocate,
2665 response_length,
2666 eot);
2667 }
2668
2669 /**
2670 * Emit the SEND instruction necessary to generate stream output data on Gen6
2671 * (for transform feedback).
2672 *
2673 * If send_commit_msg is true, this is the last piece of stream output data
2674 * from this thread, so send the data as a committed write. According to the
2675 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2676 *
2677 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2678 * writes are complete by sending the final write as a committed write."
2679 */
2680 void
2681 brw_svb_write(struct brw_codegen *p,
2682 struct brw_reg dest,
2683 unsigned msg_reg_nr,
2684 struct brw_reg src0,
2685 unsigned binding_table_index,
2686 bool send_commit_msg)
2687 {
2688 const struct gen_device_info *devinfo = p->devinfo;
2689 const unsigned target_cache =
2690 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2691 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2692 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2693 brw_inst *insn;
2694
2695 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2696
2697 insn = next_insn(p, BRW_OPCODE_SEND);
2698 brw_set_dest(p, insn, dest);
2699 brw_set_src0(p, insn, src0);
2700 brw_set_src1(p, insn, brw_imm_d(0));
2701 brw_set_dp_write_message(p, insn,
2702 binding_table_index,
2703 0, /* msg_control: ignored */
2704 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2705 target_cache,
2706 1, /* msg_length */
2707 true, /* header_present */
2708 0, /* last_render_target: ignored */
2709 send_commit_msg, /* response_length */
2710 0, /* end_of_thread */
2711 send_commit_msg); /* send_commit_msg */
2712 }
2713
2714 static unsigned
2715 brw_surface_payload_size(struct brw_codegen *p,
2716 unsigned num_channels,
2717 bool has_simd4x2,
2718 bool has_simd16)
2719 {
2720 if (has_simd4x2 &&
2721 brw_inst_access_mode(p->devinfo, p->current) == BRW_ALIGN_16)
2722 return 1;
2723 else if (has_simd16 &&
2724 brw_inst_exec_size(p->devinfo, p->current) == BRW_EXECUTE_16)
2725 return 2 * num_channels;
2726 else
2727 return num_channels;
2728 }
2729
2730 static void
2731 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2732 brw_inst *insn,
2733 unsigned atomic_op,
2734 bool response_expected)
2735 {
2736 const struct gen_device_info *devinfo = p->devinfo;
2737 unsigned msg_control =
2738 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2739 (response_expected ? 1 << 5 : 0); /* Return data expected */
2740
2741 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2742 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2743 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2744 msg_control |= 1 << 4; /* SIMD8 mode */
2745
2746 brw_inst_set_dp_msg_type(devinfo, insn,
2747 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2748 } else {
2749 brw_inst_set_dp_msg_type(devinfo, insn,
2750 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2751 }
2752 } else {
2753 brw_inst_set_dp_msg_type(devinfo, insn,
2754 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2755
2756 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2757 msg_control |= 1 << 4; /* SIMD8 mode */
2758 }
2759
2760 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2761 }
2762
2763 void
2764 brw_untyped_atomic(struct brw_codegen *p,
2765 struct brw_reg dst,
2766 struct brw_reg payload,
2767 struct brw_reg surface,
2768 unsigned atomic_op,
2769 unsigned msg_length,
2770 bool response_expected)
2771 {
2772 const struct gen_device_info *devinfo = p->devinfo;
2773 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2774 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2775 GEN7_SFID_DATAPORT_DATA_CACHE);
2776 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2777 /* Mask out unused components -- This is especially important in Align16
2778 * mode on generations that don't have native support for SIMD4x2 atomics,
2779 * because unused but enabled components will cause the dataport to perform
2780 * additional atomic operations on the addresses that happen to be in the
2781 * uninitialized Y, Z and W coordinates of the payload.
2782 */
2783 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2784 struct brw_inst *insn = brw_send_indirect_surface_message(
2785 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2786 brw_surface_payload_size(p, response_expected,
2787 devinfo->gen >= 8 || devinfo->is_haswell, true),
2788 align1);
2789
2790 brw_set_dp_untyped_atomic_message(
2791 p, insn, atomic_op, response_expected);
2792 }
2793
2794 static void
2795 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2796 struct brw_inst *insn,
2797 unsigned num_channels)
2798 {
2799 const struct gen_device_info *devinfo = p->devinfo;
2800 /* Set mask of 32-bit channels to drop. */
2801 unsigned msg_control = 0xf & (0xf << num_channels);
2802
2803 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2804 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2805 msg_control |= 1 << 4; /* SIMD16 mode */
2806 else
2807 msg_control |= 2 << 4; /* SIMD8 mode */
2808 }
2809
2810 brw_inst_set_dp_msg_type(devinfo, insn,
2811 (devinfo->gen >= 8 || devinfo->is_haswell ?
2812 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2813 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
2814 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2815 }
2816
2817 void
2818 brw_untyped_surface_read(struct brw_codegen *p,
2819 struct brw_reg dst,
2820 struct brw_reg payload,
2821 struct brw_reg surface,
2822 unsigned msg_length,
2823 unsigned num_channels)
2824 {
2825 const struct gen_device_info *devinfo = p->devinfo;
2826 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2827 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2828 GEN7_SFID_DATAPORT_DATA_CACHE);
2829 struct brw_inst *insn = brw_send_indirect_surface_message(
2830 p, sfid, dst, payload, surface, msg_length,
2831 brw_surface_payload_size(p, num_channels, true, true),
2832 false);
2833
2834 brw_set_dp_untyped_surface_read_message(
2835 p, insn, num_channels);
2836 }
2837
2838 static void
2839 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
2840 struct brw_inst *insn,
2841 unsigned num_channels)
2842 {
2843 const struct gen_device_info *devinfo = p->devinfo;
2844 /* Set mask of 32-bit channels to drop. */
2845 unsigned msg_control = 0xf & (0xf << num_channels);
2846
2847 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2848 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2849 msg_control |= 1 << 4; /* SIMD16 mode */
2850 else
2851 msg_control |= 2 << 4; /* SIMD8 mode */
2852 } else {
2853 if (devinfo->gen >= 8 || devinfo->is_haswell)
2854 msg_control |= 0 << 4; /* SIMD4x2 mode */
2855 else
2856 msg_control |= 2 << 4; /* SIMD8 mode */
2857 }
2858
2859 brw_inst_set_dp_msg_type(devinfo, insn,
2860 devinfo->gen >= 8 || devinfo->is_haswell ?
2861 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2862 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2863 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2864 }
2865
2866 void
2867 brw_untyped_surface_write(struct brw_codegen *p,
2868 struct brw_reg payload,
2869 struct brw_reg surface,
2870 unsigned msg_length,
2871 unsigned num_channels)
2872 {
2873 const struct gen_device_info *devinfo = p->devinfo;
2874 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2875 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2876 GEN7_SFID_DATAPORT_DATA_CACHE);
2877 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2878 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2879 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2880 WRITEMASK_X : WRITEMASK_XYZW;
2881 struct brw_inst *insn = brw_send_indirect_surface_message(
2882 p, sfid, brw_writemask(brw_null_reg(), mask),
2883 payload, surface, msg_length, 0, align1);
2884
2885 brw_set_dp_untyped_surface_write_message(
2886 p, insn, num_channels);
2887 }
2888
2889 static void
2890 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
2891 struct brw_inst *insn,
2892 unsigned atomic_op,
2893 bool response_expected)
2894 {
2895 const struct gen_device_info *devinfo = p->devinfo;
2896 unsigned msg_control =
2897 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2898 (response_expected ? 1 << 5 : 0); /* Return data expected */
2899
2900 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2901 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2902 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
2903 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
2904
2905 brw_inst_set_dp_msg_type(devinfo, insn,
2906 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
2907 } else {
2908 brw_inst_set_dp_msg_type(devinfo, insn,
2909 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
2910 }
2911
2912 } else {
2913 brw_inst_set_dp_msg_type(devinfo, insn,
2914 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
2915
2916 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
2917 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
2918 }
2919
2920 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2921 }
2922
2923 void
2924 brw_typed_atomic(struct brw_codegen *p,
2925 struct brw_reg dst,
2926 struct brw_reg payload,
2927 struct brw_reg surface,
2928 unsigned atomic_op,
2929 unsigned msg_length,
2930 bool response_expected) {
2931 const struct gen_device_info *devinfo = p->devinfo;
2932 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2933 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2934 GEN6_SFID_DATAPORT_RENDER_CACHE);
2935 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
2936 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2937 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2938 struct brw_inst *insn = brw_send_indirect_surface_message(
2939 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2940 brw_surface_payload_size(p, response_expected,
2941 devinfo->gen >= 8 || devinfo->is_haswell, false),
2942 true);
2943
2944 brw_set_dp_typed_atomic_message(
2945 p, insn, atomic_op, response_expected);
2946 }
2947
2948 static void
2949 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
2950 struct brw_inst *insn,
2951 unsigned num_channels)
2952 {
2953 const struct gen_device_info *devinfo = p->devinfo;
2954 /* Set mask of unused channels. */
2955 unsigned msg_control = 0xf & (0xf << num_channels);
2956
2957 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2958 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2959 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
2960 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
2961 else
2962 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
2963 }
2964
2965 brw_inst_set_dp_msg_type(devinfo, insn,
2966 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
2967 } else {
2968 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2969 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
2970 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
2971 }
2972
2973 brw_inst_set_dp_msg_type(devinfo, insn,
2974 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
2975 }
2976
2977 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2978 }
2979
2980 void
2981 brw_typed_surface_read(struct brw_codegen *p,
2982 struct brw_reg dst,
2983 struct brw_reg payload,
2984 struct brw_reg surface,
2985 unsigned msg_length,
2986 unsigned num_channels)
2987 {
2988 const struct gen_device_info *devinfo = p->devinfo;
2989 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2990 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2991 GEN6_SFID_DATAPORT_RENDER_CACHE);
2992 struct brw_inst *insn = brw_send_indirect_surface_message(
2993 p, sfid, dst, payload, surface, msg_length,
2994 brw_surface_payload_size(p, num_channels,
2995 devinfo->gen >= 8 || devinfo->is_haswell, false),
2996 true);
2997
2998 brw_set_dp_typed_surface_read_message(
2999 p, insn, num_channels);
3000 }
3001
3002 static void
3003 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3004 struct brw_inst *insn,
3005 unsigned num_channels)
3006 {
3007 const struct gen_device_info *devinfo = p->devinfo;
3008 /* Set mask of unused channels. */
3009 unsigned msg_control = 0xf & (0xf << num_channels);
3010
3011 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3012 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3013 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3014 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3015 else
3016 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3017 }
3018
3019 brw_inst_set_dp_msg_type(devinfo, insn,
3020 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3021
3022 } else {
3023 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3024 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3025 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3026 }
3027
3028 brw_inst_set_dp_msg_type(devinfo, insn,
3029 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3030 }
3031
3032 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3033 }
3034
3035 void
3036 brw_typed_surface_write(struct brw_codegen *p,
3037 struct brw_reg payload,
3038 struct brw_reg surface,
3039 unsigned msg_length,
3040 unsigned num_channels)
3041 {
3042 const struct gen_device_info *devinfo = p->devinfo;
3043 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3044 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3045 GEN6_SFID_DATAPORT_RENDER_CACHE);
3046 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3047 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3048 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3049 WRITEMASK_X : WRITEMASK_XYZW);
3050 struct brw_inst *insn = brw_send_indirect_surface_message(
3051 p, sfid, brw_writemask(brw_null_reg(), mask),
3052 payload, surface, msg_length, 0, true);
3053
3054 brw_set_dp_typed_surface_write_message(
3055 p, insn, num_channels);
3056 }
3057
3058 static void
3059 brw_set_memory_fence_message(struct brw_codegen *p,
3060 struct brw_inst *insn,
3061 enum brw_message_target sfid,
3062 bool commit_enable)
3063 {
3064 const struct gen_device_info *devinfo = p->devinfo;
3065
3066 brw_set_message_descriptor(p, insn, sfid,
3067 1 /* message length */,
3068 (commit_enable ? 1 : 0) /* response length */,
3069 true /* header present */,
3070 false);
3071
3072 switch (sfid) {
3073 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3074 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3075 break;
3076 case GEN7_SFID_DATAPORT_DATA_CACHE:
3077 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3078 break;
3079 default:
3080 unreachable("Not reached");
3081 }
3082
3083 if (commit_enable)
3084 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3085 }
3086
3087 void
3088 brw_memory_fence(struct brw_codegen *p,
3089 struct brw_reg dst)
3090 {
3091 const struct gen_device_info *devinfo = p->devinfo;
3092 const bool commit_enable = devinfo->gen == 7 && !devinfo->is_haswell;
3093 struct brw_inst *insn;
3094
3095 brw_push_insn_state(p);
3096 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3097 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3098 dst = vec1(dst);
3099
3100 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3101 * message doesn't write anything back.
3102 */
3103 insn = next_insn(p, BRW_OPCODE_SEND);
3104 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3105 brw_set_dest(p, insn, dst);
3106 brw_set_src0(p, insn, dst);
3107 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3108 commit_enable);
3109
3110 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3111 /* IVB does typed surface access through the render cache, so we need to
3112 * flush it too. Use a different register so both flushes can be
3113 * pipelined by the hardware.
3114 */
3115 insn = next_insn(p, BRW_OPCODE_SEND);
3116 brw_set_dest(p, insn, offset(dst, 1));
3117 brw_set_src0(p, insn, offset(dst, 1));
3118 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3119 commit_enable);
3120
3121 /* Now write the response of the second message into the response of the
3122 * first to trigger a pipeline stall -- This way future render and data
3123 * cache messages will be properly ordered with respect to past data and
3124 * render cache messages.
3125 */
3126 brw_MOV(p, dst, offset(dst, 1));
3127 }
3128
3129 brw_pop_insn_state(p);
3130 }
3131
3132 void
3133 brw_pixel_interpolator_query(struct brw_codegen *p,
3134 struct brw_reg dest,
3135 struct brw_reg mrf,
3136 bool noperspective,
3137 unsigned mode,
3138 struct brw_reg data,
3139 unsigned msg_length,
3140 unsigned response_length)
3141 {
3142 const struct gen_device_info *devinfo = p->devinfo;
3143 struct brw_inst *insn;
3144 const uint16_t exec_size = brw_inst_exec_size(devinfo, p->current);
3145
3146 /* brw_send_indirect_message will automatically use a direct send message
3147 * if data is actually immediate.
3148 */
3149 insn = brw_send_indirect_message(p,
3150 GEN7_SFID_PIXEL_INTERPOLATOR,
3151 dest,
3152 mrf,
3153 vec1(data));
3154 brw_inst_set_mlen(devinfo, insn, msg_length);
3155 brw_inst_set_rlen(devinfo, insn, response_length);
3156
3157 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3158 brw_inst_set_pi_slot_group(devinfo, insn, 0); /* zero unless 32/64px dispatch */
3159 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3160 brw_inst_set_pi_message_type(devinfo, insn, mode);
3161 }
3162
3163 void
3164 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3165 struct brw_reg mask)
3166 {
3167 const struct gen_device_info *devinfo = p->devinfo;
3168 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
3169 const unsigned qtr_control = brw_inst_qtr_control(devinfo, p->current);
3170 brw_inst *inst;
3171
3172 assert(devinfo->gen >= 7);
3173 assert(mask.type == BRW_REGISTER_TYPE_UD);
3174
3175 brw_push_insn_state(p);
3176
3177 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3178 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3179
3180 if (devinfo->gen >= 8) {
3181 /* Getting the first active channel index is easy on Gen8: Just find
3182 * the first bit set in the execution mask. The register exists on
3183 * HSW already but it reads back as all ones when the current
3184 * instruction has execution masking disabled, so it's kind of
3185 * useless.
3186 */
3187 struct brw_reg exec_mask =
3188 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3189
3190 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3191 /* Unfortunately, ce0 does not take into account the thread
3192 * dispatch mask, which may be a problem in cases where it's not
3193 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3194 * some n). Combine ce0 with the given dispatch (or vector) mask
3195 * to mask off those channels which were never dispatched by the
3196 * hardware.
3197 */
3198 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3199 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3200 exec_mask = vec1(dst);
3201 }
3202
3203 /* Quarter control has the effect of magically shifting the value of
3204 * ce0 so you'll get the first active channel relative to the
3205 * specified quarter control as result.
3206 */
3207 inst = brw_FBL(p, vec1(dst), exec_mask);
3208 } else {
3209 const struct brw_reg flag = brw_flag_reg(1, 0);
3210
3211 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3212
3213 /* Run enough instructions returning zero with execution masking and
3214 * a conditional modifier enabled in order to get the full execution
3215 * mask in f1.0. We could use a single 32-wide move here if it
3216 * weren't because of the hardware bug that causes channel enables to
3217 * be applied incorrectly to the second half of 32-wide instructions
3218 * on Gen7.
3219 */
3220 const unsigned lower_size = MIN2(16, exec_size);
3221 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3222 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3223 brw_imm_uw(0));
3224 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3225 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3226 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3227 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3228 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3229 }
3230
3231 /* Find the first bit set in the exec_size-wide portion of the flag
3232 * register that was updated by the last sequence of MOV
3233 * instructions.
3234 */
3235 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3236 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3237 }
3238 } else {
3239 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3240
3241 if (devinfo->gen >= 8 &&
3242 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3243 /* In SIMD4x2 mode the first active channel index is just the
3244 * negation of the first bit of the mask register. Note that ce0
3245 * doesn't take into account the dispatch mask, so the Gen7 path
3246 * should be used instead unless you have the guarantee that the
3247 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3248 * for some n).
3249 */
3250 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3251 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3252 brw_imm_ud(1));
3253
3254 } else {
3255 /* Overwrite the destination without and with execution masking to
3256 * find out which of the channels is active.
3257 */
3258 brw_push_insn_state(p);
3259 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3260 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3261 brw_imm_ud(1));
3262
3263 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3264 brw_imm_ud(0));
3265 brw_pop_insn_state(p);
3266 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3267 }
3268 }
3269
3270 brw_pop_insn_state(p);
3271 }
3272
3273 void
3274 brw_broadcast(struct brw_codegen *p,
3275 struct brw_reg dst,
3276 struct brw_reg src,
3277 struct brw_reg idx)
3278 {
3279 const struct gen_device_info *devinfo = p->devinfo;
3280 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3281 brw_inst *inst;
3282
3283 brw_push_insn_state(p);
3284 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3285 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3286
3287 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3288 src.address_mode == BRW_ADDRESS_DIRECT);
3289
3290 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3291 idx.file == BRW_IMMEDIATE_VALUE) {
3292 /* Trivial, the source is already uniform or the index is a constant.
3293 * We will typically not get here if the optimizer is doing its job, but
3294 * asserting would be mean.
3295 */
3296 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3297 brw_MOV(p, dst,
3298 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3299 stride(suboffset(src, 4 * i), 0, 4, 1)));
3300 } else {
3301 if (align1) {
3302 const struct brw_reg addr =
3303 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3304 const unsigned offset = src.nr * REG_SIZE + src.subnr;
3305 /* Limit in bytes of the signed indirect addressing immediate. */
3306 const unsigned limit = 512;
3307
3308 brw_push_insn_state(p);
3309 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3310 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3311
3312 /* Take into account the component size and horizontal stride. */
3313 assert(src.vstride == src.hstride + src.width);
3314 brw_SHL(p, addr, vec1(idx),
3315 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3316 src.hstride - 1));
3317
3318 /* We can only address up to limit bytes using the indirect
3319 * addressing immediate, account for the difference if the source
3320 * register is above this limit.
3321 */
3322 if (offset >= limit)
3323 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3324
3325 brw_pop_insn_state(p);
3326
3327 /* Use indirect addressing to fetch the specified component. */
3328 brw_MOV(p, dst,
3329 retype(brw_vec1_indirect(addr.subnr, offset % limit),
3330 src.type));
3331 } else {
3332 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3333 * to all bits of a flag register,
3334 */
3335 inst = brw_MOV(p,
3336 brw_null_reg(),
3337 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3338 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3339 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3340 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3341
3342 /* and use predicated SEL to pick the right channel. */
3343 inst = brw_SEL(p, dst,
3344 stride(suboffset(src, 4), 4, 4, 1),
3345 stride(src, 4, 4, 1));
3346 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3347 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3348 }
3349 }
3350
3351 brw_pop_insn_state(p);
3352 }
3353
3354 /**
3355 * This instruction is generated as a single-channel align1 instruction by
3356 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3357 *
3358 * We can't use the typed atomic op in the FS because that has the execution
3359 * mask ANDed with the pixel mask, but we just want to write the one dword for
3360 * all the pixels.
3361 *
3362 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3363 * one u32. So we use the same untyped atomic write message as the pixel
3364 * shader.
3365 *
3366 * The untyped atomic operation requires a BUFFER surface type with RAW
3367 * format, and is only accessible through the legacy DATA_CACHE dataport
3368 * messages.
3369 */
3370 void brw_shader_time_add(struct brw_codegen *p,
3371 struct brw_reg payload,
3372 uint32_t surf_index)
3373 {
3374 const unsigned sfid = (p->devinfo->gen >= 8 || p->devinfo->is_haswell ?
3375 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3376 GEN7_SFID_DATAPORT_DATA_CACHE);
3377 assert(p->devinfo->gen >= 7);
3378
3379 brw_push_insn_state(p);
3380 brw_set_default_access_mode(p, BRW_ALIGN_1);
3381 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3382 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3383 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3384
3385 /* We use brw_vec1_reg and unmasked because we want to increment the given
3386 * offset only once.
3387 */
3388 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3389 BRW_ARF_NULL, 0));
3390 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3391 payload.nr, 0));
3392 brw_set_src1(p, send, brw_imm_ud(0));
3393 brw_set_message_descriptor(p, send, sfid, 2, 0, false, false);
3394 brw_inst_set_binding_table_index(p->devinfo, send, surf_index);
3395 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3396
3397 brw_pop_insn_state(p);
3398 }
3399
3400
3401 /**
3402 * Emit the SEND message for a barrier
3403 */
3404 void
3405 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3406 {
3407 const struct gen_device_info *devinfo = p->devinfo;
3408 struct brw_inst *inst;
3409
3410 assert(devinfo->gen >= 7);
3411
3412 brw_push_insn_state(p);
3413 brw_set_default_access_mode(p, BRW_ALIGN_1);
3414 inst = next_insn(p, BRW_OPCODE_SEND);
3415 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3416 brw_set_src0(p, inst, src);
3417 brw_set_src1(p, inst, brw_null_reg());
3418
3419 brw_set_message_descriptor(p, inst, BRW_SFID_MESSAGE_GATEWAY,
3420 1 /* msg_length */,
3421 0 /* response_length */,
3422 false /* header_present */,
3423 false /* end_of_thread */);
3424
3425 brw_inst_set_gateway_notify(devinfo, inst, 1);
3426 brw_inst_set_gateway_subfuncid(devinfo, inst,
3427 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3428
3429 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3430 brw_pop_insn_state(p);
3431 }
3432
3433
3434 /**
3435 * Emit the wait instruction for a barrier
3436 */
3437 void
3438 brw_WAIT(struct brw_codegen *p)
3439 {
3440 const struct gen_device_info *devinfo = p->devinfo;
3441 struct brw_inst *insn;
3442
3443 struct brw_reg src = brw_notification_reg();
3444
3445 insn = next_insn(p, BRW_OPCODE_WAIT);
3446 brw_set_dest(p, insn, src);
3447 brw_set_src0(p, insn, src);
3448 brw_set_src1(p, insn, brw_null_reg());
3449
3450 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3451 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3452 }