i965: Switch to using the logical register types
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, we automatically reduce it to match the register size.
145 *
146 * In platforms that support fp64 we can emit instructions with a width of
147 * 4 that need two SIMD8 registers and an exec_size of 8 or 16. In these
148 * cases we need to make sure that these instructions have their exec sizes
149 * set properly when they are emitted and we can't rely on this code to fix
150 * it.
151 */
152 bool fix_exec_size;
153 if (devinfo->gen >= 6)
154 fix_exec_size = dest.width < BRW_EXECUTE_4;
155 else
156 fix_exec_size = dest.width < BRW_EXECUTE_8;
157
158 if (fix_exec_size)
159 brw_inst_set_exec_size(devinfo, inst, dest.width);
160 }
161
162 static void
163 validate_reg(const struct gen_device_info *devinfo,
164 brw_inst *inst, struct brw_reg reg)
165 {
166 const int hstride_for_reg[] = {0, 1, 2, 4};
167 const int vstride_for_reg[] = {0, 1, 2, 4, 8, 16, 32};
168 const int width_for_reg[] = {1, 2, 4, 8, 16};
169 const int execsize_for_reg[] = {1, 2, 4, 8, 16, 32};
170 int width, hstride, vstride, execsize;
171
172 if (reg.file == BRW_IMMEDIATE_VALUE)
173 return;
174
175 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
176 reg.file == BRW_ARF_NULL)
177 return;
178
179 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
180 *
181 * "Swizzling is not allowed when an accumulator is used as an implicit
182 * source or an explicit source in an instruction."
183 */
184 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
185 reg.nr == BRW_ARF_ACCUMULATOR)
186 assert(reg.swizzle == BRW_SWIZZLE_XYZW);
187
188 assert(reg.hstride < ARRAY_SIZE(hstride_for_reg));
189 hstride = hstride_for_reg[reg.hstride];
190
191 if (reg.vstride == 0xf) {
192 vstride = -1;
193 } else {
194 assert(reg.vstride >= 0 && reg.vstride < ARRAY_SIZE(vstride_for_reg));
195 vstride = vstride_for_reg[reg.vstride];
196 }
197
198 assert(reg.width >= 0 && reg.width < ARRAY_SIZE(width_for_reg));
199 width = width_for_reg[reg.width];
200
201 assert(brw_inst_exec_size(devinfo, inst) >= 0 &&
202 brw_inst_exec_size(devinfo, inst) < ARRAY_SIZE(execsize_for_reg));
203 execsize = execsize_for_reg[brw_inst_exec_size(devinfo, inst)];
204
205 /* Restrictions from 3.3.10: Register Region Restrictions. */
206 /* 3. */
207 assert(execsize >= width);
208
209 /* 4. */
210 if (execsize == width && hstride != 0) {
211 assert(vstride == -1 || vstride == width * hstride);
212 }
213
214 /* 5. */
215 if (execsize == width && hstride == 0) {
216 /* no restriction on vstride. */
217 }
218
219 /* 6. */
220 if (width == 1) {
221 assert(hstride == 0);
222 }
223
224 /* 7. */
225 if (execsize == 1 && width == 1) {
226 assert(hstride == 0);
227 assert(vstride == 0);
228 }
229
230 /* 8. */
231 if (vstride == 0 && hstride == 0) {
232 assert(width == 1);
233 }
234
235 /* 10. Check destination issues. */
236 }
237
238 void
239 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
240 {
241 const struct gen_device_info *devinfo = p->devinfo;
242
243 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
244 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
245 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
246 assert(reg.nr < 128);
247
248 gen7_convert_mrf_to_grf(p, &reg);
249
250 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
251 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
252 /* Any source modifiers or regions will be ignored, since this just
253 * identifies the MRF/GRF to start reading the message contents from.
254 * Check for some likely failures.
255 */
256 assert(!reg.negate);
257 assert(!reg.abs);
258 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
259 }
260
261 validate_reg(devinfo, inst, reg);
262
263 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
264 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
265 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
266 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
267
268 if (reg.file == BRW_IMMEDIATE_VALUE) {
269 if (reg.type == BRW_REGISTER_TYPE_DF ||
270 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
271 brw_inst_set_imm_df(devinfo, inst, reg.df);
272 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
273 reg.type == BRW_REGISTER_TYPE_Q)
274 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
275 else
276 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
277
278 if (type_sz(reg.type) < 8) {
279 brw_inst_set_src1_reg_file(devinfo, inst,
280 BRW_ARCHITECTURE_REGISTER_FILE);
281 brw_inst_set_src1_reg_hw_type(devinfo, inst,
282 brw_inst_src0_reg_hw_type(devinfo, inst));
283 }
284 } else {
285 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
286 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
287 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
288 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
289 } else {
290 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
291 }
292 } else {
293 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
294
295 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
296 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
297 } else {
298 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
299 }
300 }
301
302 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
303 if (reg.width == BRW_WIDTH_1 &&
304 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
305 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
306 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
307 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
308 } else {
309 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
310 brw_inst_set_src0_width(devinfo, inst, reg.width);
311 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
312 }
313 } else {
314 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
315 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
316 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
317 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
318 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
319 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
320 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
321 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
322
323 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
324 /* This is an oddity of the fact we're using the same
325 * descriptions for registers in align_16 as align_1:
326 */
327 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
328 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
329 reg.type == BRW_REGISTER_TYPE_DF &&
330 reg.vstride == BRW_VERTICAL_STRIDE_2) {
331 /* From SNB PRM:
332 *
333 * "For Align16 access mode, only encodings of 0000 and 0011
334 * are allowed. Other codes are reserved."
335 *
336 * Presumably the DevSNB behavior applies to IVB as well.
337 */
338 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
339 } else {
340 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
341 }
342 }
343 }
344 }
345
346
347 void
348 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
349 {
350 const struct gen_device_info *devinfo = p->devinfo;
351
352 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
353 assert(reg.nr < 128);
354
355 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
356 *
357 * "Accumulator registers may be accessed explicitly as src0
358 * operands only."
359 */
360 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
361 reg.nr != BRW_ARF_ACCUMULATOR);
362
363 gen7_convert_mrf_to_grf(p, &reg);
364 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
365
366 validate_reg(devinfo, inst, reg);
367
368 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
369 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
370 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
371
372 /* Only src1 can be immediate in two-argument instructions.
373 */
374 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
375
376 if (reg.file == BRW_IMMEDIATE_VALUE) {
377 /* two-argument instructions can only use 32-bit immediates */
378 assert(type_sz(reg.type) < 8);
379 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
380 } else {
381 /* This is a hardware restriction, which may or may not be lifted
382 * in the future:
383 */
384 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
385 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
386
387 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
388 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
389 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
390 } else {
391 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
392 }
393
394 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
395 if (reg.width == BRW_WIDTH_1 &&
396 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
397 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
398 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
399 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
400 } else {
401 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
402 brw_inst_set_src1_width(devinfo, inst, reg.width);
403 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
404 }
405 } else {
406 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
407 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
408 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
409 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
410 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
411 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
412 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
413 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
414
415 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
416 /* This is an oddity of the fact we're using the same
417 * descriptions for registers in align_16 as align_1:
418 */
419 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
420 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
421 reg.type == BRW_REGISTER_TYPE_DF &&
422 reg.vstride == BRW_VERTICAL_STRIDE_2) {
423 /* From SNB PRM:
424 *
425 * "For Align16 access mode, only encodings of 0000 and 0011
426 * are allowed. Other codes are reserved."
427 *
428 * Presumably the DevSNB behavior applies to IVB as well.
429 */
430 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
431 } else {
432 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
433 }
434 }
435 }
436 }
437
438 /**
439 * Set the Message Descriptor and Extended Message Descriptor fields
440 * for SEND messages.
441 *
442 * \note This zeroes out the Function Control bits, so it must be called
443 * \b before filling out any message-specific data. Callers can
444 * choose not to fill in irrelevant bits; they will be zero.
445 */
446 void
447 brw_set_message_descriptor(struct brw_codegen *p,
448 brw_inst *inst,
449 enum brw_message_target sfid,
450 unsigned msg_length,
451 unsigned response_length,
452 bool header_present,
453 bool end_of_thread)
454 {
455 const struct gen_device_info *devinfo = p->devinfo;
456
457 brw_set_src1(p, inst, brw_imm_d(0));
458
459 /* For indirect sends, `inst` will not be the SEND/SENDC instruction
460 * itself; instead, it will be a MOV/OR into the address register.
461 *
462 * In this case, we avoid setting the extended message descriptor bits,
463 * since they go on the later SEND/SENDC instead and if set here would
464 * instead clobber the conditionalmod bits.
465 */
466 unsigned opcode = brw_inst_opcode(devinfo, inst);
467 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
468 brw_inst_set_sfid(devinfo, inst, sfid);
469 }
470
471 brw_inst_set_mlen(devinfo, inst, msg_length);
472 brw_inst_set_rlen(devinfo, inst, response_length);
473 brw_inst_set_eot(devinfo, inst, end_of_thread);
474
475 if (devinfo->gen >= 5) {
476 brw_inst_set_header_present(devinfo, inst, header_present);
477 }
478 }
479
480 static void brw_set_math_message( struct brw_codegen *p,
481 brw_inst *inst,
482 unsigned function,
483 unsigned integer_type,
484 bool low_precision,
485 unsigned dataType )
486 {
487 const struct gen_device_info *devinfo = p->devinfo;
488 unsigned msg_length;
489 unsigned response_length;
490
491 /* Infer message length from the function */
492 switch (function) {
493 case BRW_MATH_FUNCTION_POW:
494 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
495 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
496 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
497 msg_length = 2;
498 break;
499 default:
500 msg_length = 1;
501 break;
502 }
503
504 /* Infer response length from the function */
505 switch (function) {
506 case BRW_MATH_FUNCTION_SINCOS:
507 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
508 response_length = 2;
509 break;
510 default:
511 response_length = 1;
512 break;
513 }
514
515
516 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
517 msg_length, response_length, false, false);
518 brw_inst_set_math_msg_function(devinfo, inst, function);
519 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
520 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
521 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
522 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
523 brw_inst_set_saturate(devinfo, inst, 0);
524 }
525
526
527 static void brw_set_ff_sync_message(struct brw_codegen *p,
528 brw_inst *insn,
529 bool allocate,
530 unsigned response_length,
531 bool end_of_thread)
532 {
533 const struct gen_device_info *devinfo = p->devinfo;
534
535 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
536 1, response_length, true, end_of_thread);
537 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
538 brw_inst_set_urb_allocate(devinfo, insn, allocate);
539 /* The following fields are not used by FF_SYNC: */
540 brw_inst_set_urb_global_offset(devinfo, insn, 0);
541 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
542 brw_inst_set_urb_used(devinfo, insn, 0);
543 brw_inst_set_urb_complete(devinfo, insn, 0);
544 }
545
546 static void brw_set_urb_message( struct brw_codegen *p,
547 brw_inst *insn,
548 enum brw_urb_write_flags flags,
549 unsigned msg_length,
550 unsigned response_length,
551 unsigned offset,
552 unsigned swizzle_control )
553 {
554 const struct gen_device_info *devinfo = p->devinfo;
555
556 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
557 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
558 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
559
560 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
561 msg_length, response_length, true,
562 flags & BRW_URB_WRITE_EOT);
563
564 if (flags & BRW_URB_WRITE_OWORD) {
565 assert(msg_length == 2); /* header + one OWORD of data */
566 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
567 } else {
568 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
569 }
570
571 brw_inst_set_urb_global_offset(devinfo, insn, offset);
572 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
573
574 if (devinfo->gen < 8) {
575 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
576 }
577
578 if (devinfo->gen < 7) {
579 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
580 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
581 } else {
582 brw_inst_set_urb_per_slot_offset(devinfo, insn,
583 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
584 }
585 }
586
587 void
588 brw_set_dp_write_message(struct brw_codegen *p,
589 brw_inst *insn,
590 unsigned binding_table_index,
591 unsigned msg_control,
592 unsigned msg_type,
593 unsigned target_cache,
594 unsigned msg_length,
595 bool header_present,
596 unsigned last_render_target,
597 unsigned response_length,
598 unsigned end_of_thread,
599 unsigned send_commit_msg)
600 {
601 const struct gen_device_info *devinfo = p->devinfo;
602 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
603 BRW_SFID_DATAPORT_WRITE);
604
605 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
606 header_present, end_of_thread);
607
608 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
609 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
610 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
611 brw_inst_set_rt_last(devinfo, insn, last_render_target);
612 if (devinfo->gen < 7) {
613 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
614 }
615 }
616
617 void
618 brw_set_dp_read_message(struct brw_codegen *p,
619 brw_inst *insn,
620 unsigned binding_table_index,
621 unsigned msg_control,
622 unsigned msg_type,
623 unsigned target_cache,
624 unsigned msg_length,
625 bool header_present,
626 unsigned response_length)
627 {
628 const struct gen_device_info *devinfo = p->devinfo;
629 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
630 BRW_SFID_DATAPORT_READ);
631
632 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
633 header_present, false);
634
635 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
636 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
637 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
638 if (devinfo->gen < 6)
639 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
640 }
641
642 void
643 brw_set_sampler_message(struct brw_codegen *p,
644 brw_inst *inst,
645 unsigned binding_table_index,
646 unsigned sampler,
647 unsigned msg_type,
648 unsigned response_length,
649 unsigned msg_length,
650 unsigned header_present,
651 unsigned simd_mode,
652 unsigned return_format)
653 {
654 const struct gen_device_info *devinfo = p->devinfo;
655
656 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
657 response_length, header_present, false);
658
659 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
660 brw_inst_set_sampler(devinfo, inst, sampler);
661 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
662 if (devinfo->gen >= 5) {
663 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
664 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
665 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
666 }
667 }
668
669 static void
670 gen7_set_dp_scratch_message(struct brw_codegen *p,
671 brw_inst *inst,
672 bool write,
673 bool dword,
674 bool invalidate_after_read,
675 unsigned num_regs,
676 unsigned addr_offset,
677 unsigned mlen,
678 unsigned rlen,
679 bool header_present)
680 {
681 const struct gen_device_info *devinfo = p->devinfo;
682 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
683 (devinfo->gen >= 8 && num_regs == 8));
684 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
685 num_regs - 1);
686
687 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
688 mlen, rlen, header_present, false);
689 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
690 brw_inst_set_scratch_read_write(devinfo, inst, write);
691 brw_inst_set_scratch_type(devinfo, inst, dword);
692 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
693 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
694 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
695 }
696
697 #define next_insn brw_next_insn
698 brw_inst *
699 brw_next_insn(struct brw_codegen *p, unsigned opcode)
700 {
701 const struct gen_device_info *devinfo = p->devinfo;
702 brw_inst *insn;
703
704 if (p->nr_insn + 1 > p->store_size) {
705 p->store_size <<= 1;
706 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
707 }
708
709 p->next_insn_offset += 16;
710 insn = &p->store[p->nr_insn++];
711 memcpy(insn, p->current, sizeof(*insn));
712
713 brw_inst_set_opcode(devinfo, insn, opcode);
714 return insn;
715 }
716
717 static brw_inst *
718 brw_alu1(struct brw_codegen *p, unsigned opcode,
719 struct brw_reg dest, struct brw_reg src)
720 {
721 brw_inst *insn = next_insn(p, opcode);
722 brw_set_dest(p, insn, dest);
723 brw_set_src0(p, insn, src);
724 return insn;
725 }
726
727 static brw_inst *
728 brw_alu2(struct brw_codegen *p, unsigned opcode,
729 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
730 {
731 /* 64-bit immediates are only supported on 1-src instructions */
732 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
733 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
734
735 brw_inst *insn = next_insn(p, opcode);
736 brw_set_dest(p, insn, dest);
737 brw_set_src0(p, insn, src0);
738 brw_set_src1(p, insn, src1);
739 return insn;
740 }
741
742 static int
743 get_3src_subreg_nr(struct brw_reg reg)
744 {
745 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
746 * use 32-bit units (components 0..7). Since they only support F/D/UD
747 * types, this doesn't lose any flexibility, but uses fewer bits.
748 */
749 return reg.subnr / 4;
750 }
751
752 static brw_inst *
753 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
754 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
755 {
756 const struct gen_device_info *devinfo = p->devinfo;
757 brw_inst *inst = next_insn(p, opcode);
758
759 gen7_convert_mrf_to_grf(p, &dest);
760
761 assert(brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16);
762
763 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
764 dest.file == BRW_MESSAGE_REGISTER_FILE);
765 assert(dest.nr < 128);
766 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
767 assert(dest.type == BRW_REGISTER_TYPE_F ||
768 dest.type == BRW_REGISTER_TYPE_DF ||
769 dest.type == BRW_REGISTER_TYPE_D ||
770 dest.type == BRW_REGISTER_TYPE_UD);
771 if (devinfo->gen == 6) {
772 brw_inst_set_3src_dst_reg_file(devinfo, inst,
773 dest.file == BRW_MESSAGE_REGISTER_FILE);
774 }
775 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
776 brw_inst_set_3src_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
777 brw_inst_set_3src_dst_writemask(devinfo, inst, dest.writemask);
778
779 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
780 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
781 assert(src0.nr < 128);
782 brw_inst_set_3src_src0_swizzle(devinfo, inst, src0.swizzle);
783 brw_inst_set_3src_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
784 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
785 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
786 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
787 brw_inst_set_3src_src0_rep_ctrl(devinfo, inst,
788 src0.vstride == BRW_VERTICAL_STRIDE_0);
789
790 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
791 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
792 assert(src1.nr < 128);
793 brw_inst_set_3src_src1_swizzle(devinfo, inst, src1.swizzle);
794 brw_inst_set_3src_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
795 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
796 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
797 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
798 brw_inst_set_3src_src1_rep_ctrl(devinfo, inst,
799 src1.vstride == BRW_VERTICAL_STRIDE_0);
800
801 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
802 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
803 assert(src2.nr < 128);
804 brw_inst_set_3src_src2_swizzle(devinfo, inst, src2.swizzle);
805 brw_inst_set_3src_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
806 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
807 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
808 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
809 brw_inst_set_3src_src2_rep_ctrl(devinfo, inst,
810 src2.vstride == BRW_VERTICAL_STRIDE_0);
811
812 if (devinfo->gen >= 7) {
813 /* Set both the source and destination types based on dest.type,
814 * ignoring the source register types. The MAD and LRP emitters ensure
815 * that all four types are float. The BFE and BFI2 emitters, however,
816 * may send us mixed D and UD types and want us to ignore that and use
817 * the destination type.
818 */
819 switch (dest.type) {
820 case BRW_REGISTER_TYPE_F:
821 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_F);
822 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_F);
823 break;
824 case BRW_REGISTER_TYPE_DF:
825 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_DF);
826 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_DF);
827 break;
828 case BRW_REGISTER_TYPE_D:
829 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_D);
830 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_D);
831 break;
832 case BRW_REGISTER_TYPE_UD:
833 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_UD);
834 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_UD);
835 break;
836 default:
837 unreachable("not reached");
838 }
839 }
840
841 return inst;
842 }
843
844
845 /***********************************************************************
846 * Convenience routines.
847 */
848 #define ALU1(OP) \
849 brw_inst *brw_##OP(struct brw_codegen *p, \
850 struct brw_reg dest, \
851 struct brw_reg src0) \
852 { \
853 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
854 }
855
856 #define ALU2(OP) \
857 brw_inst *brw_##OP(struct brw_codegen *p, \
858 struct brw_reg dest, \
859 struct brw_reg src0, \
860 struct brw_reg src1) \
861 { \
862 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
863 }
864
865 #define ALU3(OP) \
866 brw_inst *brw_##OP(struct brw_codegen *p, \
867 struct brw_reg dest, \
868 struct brw_reg src0, \
869 struct brw_reg src1, \
870 struct brw_reg src2) \
871 { \
872 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
873 }
874
875 #define ALU3F(OP) \
876 brw_inst *brw_##OP(struct brw_codegen *p, \
877 struct brw_reg dest, \
878 struct brw_reg src0, \
879 struct brw_reg src1, \
880 struct brw_reg src2) \
881 { \
882 assert(dest.type == BRW_REGISTER_TYPE_F || \
883 dest.type == BRW_REGISTER_TYPE_DF); \
884 if (dest.type == BRW_REGISTER_TYPE_F) { \
885 assert(src0.type == BRW_REGISTER_TYPE_F); \
886 assert(src1.type == BRW_REGISTER_TYPE_F); \
887 assert(src2.type == BRW_REGISTER_TYPE_F); \
888 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
889 assert(src0.type == BRW_REGISTER_TYPE_DF); \
890 assert(src1.type == BRW_REGISTER_TYPE_DF); \
891 assert(src2.type == BRW_REGISTER_TYPE_DF); \
892 } \
893 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
894 }
895
896 /* Rounding operations (other than RNDD) require two instructions - the first
897 * stores a rounded value (possibly the wrong way) in the dest register, but
898 * also sets a per-channel "increment bit" in the flag register. A predicated
899 * add of 1.0 fixes dest to contain the desired result.
900 *
901 * Sandybridge and later appear to round correctly without an ADD.
902 */
903 #define ROUND(OP) \
904 void brw_##OP(struct brw_codegen *p, \
905 struct brw_reg dest, \
906 struct brw_reg src) \
907 { \
908 const struct gen_device_info *devinfo = p->devinfo; \
909 brw_inst *rnd, *add; \
910 rnd = next_insn(p, BRW_OPCODE_##OP); \
911 brw_set_dest(p, rnd, dest); \
912 brw_set_src0(p, rnd, src); \
913 \
914 if (devinfo->gen < 6) { \
915 /* turn on round-increments */ \
916 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
917 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
918 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
919 } \
920 }
921
922
923 ALU2(SEL)
924 ALU1(NOT)
925 ALU2(AND)
926 ALU2(OR)
927 ALU2(XOR)
928 ALU2(SHR)
929 ALU2(SHL)
930 ALU1(DIM)
931 ALU2(ASR)
932 ALU1(FRC)
933 ALU1(RNDD)
934 ALU2(MAC)
935 ALU2(MACH)
936 ALU1(LZD)
937 ALU2(DP4)
938 ALU2(DPH)
939 ALU2(DP3)
940 ALU2(DP2)
941 ALU3F(MAD)
942 ALU3F(LRP)
943 ALU1(BFREV)
944 ALU3(BFE)
945 ALU2(BFI1)
946 ALU3(BFI2)
947 ALU1(FBH)
948 ALU1(FBL)
949 ALU1(CBIT)
950 ALU2(ADDC)
951 ALU2(SUBB)
952
953 ROUND(RNDZ)
954 ROUND(RNDE)
955
956 brw_inst *
957 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
958 {
959 const struct gen_device_info *devinfo = p->devinfo;
960
961 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
962 * To avoid the problems that causes, we use a <1,2,0> source region to read
963 * each element twice.
964 */
965 if (devinfo->gen == 7 && !devinfo->is_haswell &&
966 brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1 &&
967 dest.type == BRW_REGISTER_TYPE_DF &&
968 (src0.type == BRW_REGISTER_TYPE_F ||
969 src0.type == BRW_REGISTER_TYPE_D ||
970 src0.type == BRW_REGISTER_TYPE_UD) &&
971 !has_scalar_region(src0)) {
972 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
973 src0.width == BRW_WIDTH_4 &&
974 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
975
976 src0.vstride = BRW_VERTICAL_STRIDE_1;
977 src0.width = BRW_WIDTH_2;
978 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
979 }
980
981 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
982 }
983
984 brw_inst *
985 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
986 struct brw_reg src0, struct brw_reg src1)
987 {
988 /* 6.2.2: add */
989 if (src0.type == BRW_REGISTER_TYPE_F ||
990 (src0.file == BRW_IMMEDIATE_VALUE &&
991 src0.type == BRW_REGISTER_TYPE_VF)) {
992 assert(src1.type != BRW_REGISTER_TYPE_UD);
993 assert(src1.type != BRW_REGISTER_TYPE_D);
994 }
995
996 if (src1.type == BRW_REGISTER_TYPE_F ||
997 (src1.file == BRW_IMMEDIATE_VALUE &&
998 src1.type == BRW_REGISTER_TYPE_VF)) {
999 assert(src0.type != BRW_REGISTER_TYPE_UD);
1000 assert(src0.type != BRW_REGISTER_TYPE_D);
1001 }
1002
1003 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1004 }
1005
1006 brw_inst *
1007 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1008 struct brw_reg src0, struct brw_reg src1)
1009 {
1010 assert(dest.type == src0.type);
1011 assert(src0.type == src1.type);
1012 switch (src0.type) {
1013 case BRW_REGISTER_TYPE_B:
1014 case BRW_REGISTER_TYPE_UB:
1015 case BRW_REGISTER_TYPE_W:
1016 case BRW_REGISTER_TYPE_UW:
1017 case BRW_REGISTER_TYPE_D:
1018 case BRW_REGISTER_TYPE_UD:
1019 break;
1020 default:
1021 unreachable("Bad type for brw_AVG");
1022 }
1023
1024 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1025 }
1026
1027 brw_inst *
1028 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1029 struct brw_reg src0, struct brw_reg src1)
1030 {
1031 /* 6.32.38: mul */
1032 if (src0.type == BRW_REGISTER_TYPE_D ||
1033 src0.type == BRW_REGISTER_TYPE_UD ||
1034 src1.type == BRW_REGISTER_TYPE_D ||
1035 src1.type == BRW_REGISTER_TYPE_UD) {
1036 assert(dest.type != BRW_REGISTER_TYPE_F);
1037 }
1038
1039 if (src0.type == BRW_REGISTER_TYPE_F ||
1040 (src0.file == BRW_IMMEDIATE_VALUE &&
1041 src0.type == BRW_REGISTER_TYPE_VF)) {
1042 assert(src1.type != BRW_REGISTER_TYPE_UD);
1043 assert(src1.type != BRW_REGISTER_TYPE_D);
1044 }
1045
1046 if (src1.type == BRW_REGISTER_TYPE_F ||
1047 (src1.file == BRW_IMMEDIATE_VALUE &&
1048 src1.type == BRW_REGISTER_TYPE_VF)) {
1049 assert(src0.type != BRW_REGISTER_TYPE_UD);
1050 assert(src0.type != BRW_REGISTER_TYPE_D);
1051 }
1052
1053 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1054 src0.nr != BRW_ARF_ACCUMULATOR);
1055 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1056 src1.nr != BRW_ARF_ACCUMULATOR);
1057
1058 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1059 }
1060
1061 brw_inst *
1062 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1063 struct brw_reg src0, struct brw_reg src1)
1064 {
1065 src0.vstride = BRW_VERTICAL_STRIDE_0;
1066 src0.width = BRW_WIDTH_1;
1067 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1068 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1069 }
1070
1071 brw_inst *
1072 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1073 struct brw_reg src0, struct brw_reg src1)
1074 {
1075 src0.vstride = BRW_VERTICAL_STRIDE_0;
1076 src0.width = BRW_WIDTH_1;
1077 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1078 src1.vstride = BRW_VERTICAL_STRIDE_8;
1079 src1.width = BRW_WIDTH_8;
1080 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1081 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1082 }
1083
1084 brw_inst *
1085 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1086 {
1087 const struct gen_device_info *devinfo = p->devinfo;
1088 const bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1089 /* The F32TO16 instruction doesn't support 32-bit destination types in
1090 * Align1 mode, and neither does the Gen8 implementation in terms of a
1091 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1092 * an undocumented feature.
1093 */
1094 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1095 (!align16 || devinfo->gen >= 8));
1096 brw_inst *inst;
1097
1098 if (align16) {
1099 assert(dst.type == BRW_REGISTER_TYPE_UD);
1100 } else {
1101 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1102 dst.type == BRW_REGISTER_TYPE_W ||
1103 dst.type == BRW_REGISTER_TYPE_UW ||
1104 dst.type == BRW_REGISTER_TYPE_HF);
1105 }
1106
1107 brw_push_insn_state(p);
1108
1109 if (needs_zero_fill) {
1110 brw_set_default_access_mode(p, BRW_ALIGN_1);
1111 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1112 }
1113
1114 if (devinfo->gen >= 8) {
1115 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1116 } else {
1117 assert(devinfo->gen == 7);
1118 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1119 }
1120
1121 if (needs_zero_fill) {
1122 brw_inst_set_no_dd_clear(devinfo, inst, true);
1123 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1124 brw_inst_set_no_dd_check(devinfo, inst, true);
1125 }
1126
1127 brw_pop_insn_state(p);
1128 return inst;
1129 }
1130
1131 brw_inst *
1132 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1133 {
1134 const struct gen_device_info *devinfo = p->devinfo;
1135 bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1136
1137 if (align16) {
1138 assert(src.type == BRW_REGISTER_TYPE_UD);
1139 } else {
1140 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1141 *
1142 * Because this instruction does not have a 16-bit floating-point
1143 * type, the source data type must be Word (W). The destination type
1144 * must be F (Float).
1145 */
1146 if (src.type == BRW_REGISTER_TYPE_UD)
1147 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1148
1149 assert(src.type == BRW_REGISTER_TYPE_W ||
1150 src.type == BRW_REGISTER_TYPE_UW ||
1151 src.type == BRW_REGISTER_TYPE_HF);
1152 }
1153
1154 if (devinfo->gen >= 8) {
1155 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1156 } else {
1157 assert(devinfo->gen == 7);
1158 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1159 }
1160 }
1161
1162
1163 void brw_NOP(struct brw_codegen *p)
1164 {
1165 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1166 memset(insn, 0, sizeof(*insn));
1167 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1168 }
1169
1170
1171
1172
1173
1174 /***********************************************************************
1175 * Comparisons, if/else/endif
1176 */
1177
1178 brw_inst *
1179 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1180 unsigned predicate_control)
1181 {
1182 const struct gen_device_info *devinfo = p->devinfo;
1183 struct brw_reg ip = brw_ip_reg();
1184 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1185
1186 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_2);
1187 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1188 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1189 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1190
1191 return inst;
1192 }
1193
1194 static void
1195 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1196 {
1197 p->if_stack[p->if_stack_depth] = inst - p->store;
1198
1199 p->if_stack_depth++;
1200 if (p->if_stack_array_size <= p->if_stack_depth) {
1201 p->if_stack_array_size *= 2;
1202 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1203 p->if_stack_array_size);
1204 }
1205 }
1206
1207 static brw_inst *
1208 pop_if_stack(struct brw_codegen *p)
1209 {
1210 p->if_stack_depth--;
1211 return &p->store[p->if_stack[p->if_stack_depth]];
1212 }
1213
1214 static void
1215 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1216 {
1217 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1218 p->loop_stack_array_size *= 2;
1219 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1220 p->loop_stack_array_size);
1221 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1222 p->loop_stack_array_size);
1223 }
1224
1225 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1226 p->loop_stack_depth++;
1227 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1228 }
1229
1230 static brw_inst *
1231 get_inner_do_insn(struct brw_codegen *p)
1232 {
1233 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1234 }
1235
1236 /* EU takes the value from the flag register and pushes it onto some
1237 * sort of a stack (presumably merging with any flag value already on
1238 * the stack). Within an if block, the flags at the top of the stack
1239 * control execution on each channel of the unit, eg. on each of the
1240 * 16 pixel values in our wm programs.
1241 *
1242 * When the matching 'else' instruction is reached (presumably by
1243 * countdown of the instruction count patched in by our ELSE/ENDIF
1244 * functions), the relevant flags are inverted.
1245 *
1246 * When the matching 'endif' instruction is reached, the flags are
1247 * popped off. If the stack is now empty, normal execution resumes.
1248 */
1249 brw_inst *
1250 brw_IF(struct brw_codegen *p, unsigned execute_size)
1251 {
1252 const struct gen_device_info *devinfo = p->devinfo;
1253 brw_inst *insn;
1254
1255 insn = next_insn(p, BRW_OPCODE_IF);
1256
1257 /* Override the defaults for this instruction:
1258 */
1259 if (devinfo->gen < 6) {
1260 brw_set_dest(p, insn, brw_ip_reg());
1261 brw_set_src0(p, insn, brw_ip_reg());
1262 brw_set_src1(p, insn, brw_imm_d(0x0));
1263 } else if (devinfo->gen == 6) {
1264 brw_set_dest(p, insn, brw_imm_w(0));
1265 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1266 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1267 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1268 } else if (devinfo->gen == 7) {
1269 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1270 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1271 brw_set_src1(p, insn, brw_imm_w(0));
1272 brw_inst_set_jip(devinfo, insn, 0);
1273 brw_inst_set_uip(devinfo, insn, 0);
1274 } else {
1275 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1276 brw_set_src0(p, insn, brw_imm_d(0));
1277 brw_inst_set_jip(devinfo, insn, 0);
1278 brw_inst_set_uip(devinfo, insn, 0);
1279 }
1280
1281 brw_inst_set_exec_size(devinfo, insn, execute_size);
1282 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1283 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1284 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1285 if (!p->single_program_flow && devinfo->gen < 6)
1286 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1287
1288 push_if_stack(p, insn);
1289 p->if_depth_in_loop[p->loop_stack_depth]++;
1290 return insn;
1291 }
1292
1293 /* This function is only used for gen6-style IF instructions with an
1294 * embedded comparison (conditional modifier). It is not used on gen7.
1295 */
1296 brw_inst *
1297 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1298 struct brw_reg src0, struct brw_reg src1)
1299 {
1300 const struct gen_device_info *devinfo = p->devinfo;
1301 brw_inst *insn;
1302
1303 insn = next_insn(p, BRW_OPCODE_IF);
1304
1305 brw_set_dest(p, insn, brw_imm_w(0));
1306 brw_inst_set_exec_size(devinfo, insn,
1307 brw_inst_exec_size(devinfo, p->current));
1308 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1309 brw_set_src0(p, insn, src0);
1310 brw_set_src1(p, insn, src1);
1311
1312 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1313 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1314 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1315
1316 push_if_stack(p, insn);
1317 return insn;
1318 }
1319
1320 /**
1321 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1322 */
1323 static void
1324 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1325 brw_inst *if_inst, brw_inst *else_inst)
1326 {
1327 const struct gen_device_info *devinfo = p->devinfo;
1328
1329 /* The next instruction (where the ENDIF would be, if it existed) */
1330 brw_inst *next_inst = &p->store[p->nr_insn];
1331
1332 assert(p->single_program_flow);
1333 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1334 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1335 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1336
1337 /* Convert IF to an ADD instruction that moves the instruction pointer
1338 * to the first instruction of the ELSE block. If there is no ELSE
1339 * block, point to where ENDIF would be. Reverse the predicate.
1340 *
1341 * There's no need to execute an ENDIF since we don't need to do any
1342 * stack operations, and if we're currently executing, we just want to
1343 * continue normally.
1344 */
1345 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1346 brw_inst_set_pred_inv(devinfo, if_inst, true);
1347
1348 if (else_inst != NULL) {
1349 /* Convert ELSE to an ADD instruction that points where the ENDIF
1350 * would be.
1351 */
1352 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1353
1354 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1355 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1356 } else {
1357 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1358 }
1359 }
1360
1361 /**
1362 * Patch IF and ELSE instructions with appropriate jump targets.
1363 */
1364 static void
1365 patch_IF_ELSE(struct brw_codegen *p,
1366 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1367 {
1368 const struct gen_device_info *devinfo = p->devinfo;
1369
1370 /* We shouldn't be patching IF and ELSE instructions in single program flow
1371 * mode when gen < 6, because in single program flow mode on those
1372 * platforms, we convert flow control instructions to conditional ADDs that
1373 * operate on IP (see brw_ENDIF).
1374 *
1375 * However, on Gen6, writing to IP doesn't work in single program flow mode
1376 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1377 * not be updated by non-flow control instructions."). And on later
1378 * platforms, there is no significant benefit to converting control flow
1379 * instructions to conditional ADDs. So we do patch IF and ELSE
1380 * instructions in single program flow mode on those platforms.
1381 */
1382 if (devinfo->gen < 6)
1383 assert(!p->single_program_flow);
1384
1385 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1386 assert(endif_inst != NULL);
1387 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1388
1389 unsigned br = brw_jump_scale(devinfo);
1390
1391 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1392 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1393
1394 if (else_inst == NULL) {
1395 /* Patch IF -> ENDIF */
1396 if (devinfo->gen < 6) {
1397 /* Turn it into an IFF, which means no mask stack operations for
1398 * all-false and jumping past the ENDIF.
1399 */
1400 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1401 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1402 br * (endif_inst - if_inst + 1));
1403 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1404 } else if (devinfo->gen == 6) {
1405 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1406 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1407 } else {
1408 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1409 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1410 }
1411 } else {
1412 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1413
1414 /* Patch IF -> ELSE */
1415 if (devinfo->gen < 6) {
1416 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1417 br * (else_inst - if_inst));
1418 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1419 } else if (devinfo->gen == 6) {
1420 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1421 br * (else_inst - if_inst + 1));
1422 }
1423
1424 /* Patch ELSE -> ENDIF */
1425 if (devinfo->gen < 6) {
1426 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1427 * matching ENDIF.
1428 */
1429 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1430 br * (endif_inst - else_inst + 1));
1431 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1432 } else if (devinfo->gen == 6) {
1433 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1434 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1435 br * (endif_inst - else_inst));
1436 } else {
1437 /* The IF instruction's JIP should point just past the ELSE */
1438 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1439 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1440 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1441 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1442 if (devinfo->gen >= 8) {
1443 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1444 * should point to ENDIF.
1445 */
1446 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1447 }
1448 }
1449 }
1450 }
1451
1452 void
1453 brw_ELSE(struct brw_codegen *p)
1454 {
1455 const struct gen_device_info *devinfo = p->devinfo;
1456 brw_inst *insn;
1457
1458 insn = next_insn(p, BRW_OPCODE_ELSE);
1459
1460 if (devinfo->gen < 6) {
1461 brw_set_dest(p, insn, brw_ip_reg());
1462 brw_set_src0(p, insn, brw_ip_reg());
1463 brw_set_src1(p, insn, brw_imm_d(0x0));
1464 } else if (devinfo->gen == 6) {
1465 brw_set_dest(p, insn, brw_imm_w(0));
1466 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1467 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1468 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1469 } else if (devinfo->gen == 7) {
1470 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1471 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1472 brw_set_src1(p, insn, brw_imm_w(0));
1473 brw_inst_set_jip(devinfo, insn, 0);
1474 brw_inst_set_uip(devinfo, insn, 0);
1475 } else {
1476 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1477 brw_set_src0(p, insn, brw_imm_d(0));
1478 brw_inst_set_jip(devinfo, insn, 0);
1479 brw_inst_set_uip(devinfo, insn, 0);
1480 }
1481
1482 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1483 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1484 if (!p->single_program_flow && devinfo->gen < 6)
1485 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1486
1487 push_if_stack(p, insn);
1488 }
1489
1490 void
1491 brw_ENDIF(struct brw_codegen *p)
1492 {
1493 const struct gen_device_info *devinfo = p->devinfo;
1494 brw_inst *insn = NULL;
1495 brw_inst *else_inst = NULL;
1496 brw_inst *if_inst = NULL;
1497 brw_inst *tmp;
1498 bool emit_endif = true;
1499
1500 /* In single program flow mode, we can express IF and ELSE instructions
1501 * equivalently as ADD instructions that operate on IP. On platforms prior
1502 * to Gen6, flow control instructions cause an implied thread switch, so
1503 * this is a significant savings.
1504 *
1505 * However, on Gen6, writing to IP doesn't work in single program flow mode
1506 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1507 * not be updated by non-flow control instructions."). And on later
1508 * platforms, there is no significant benefit to converting control flow
1509 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1510 * Gen5.
1511 */
1512 if (devinfo->gen < 6 && p->single_program_flow)
1513 emit_endif = false;
1514
1515 /*
1516 * A single next_insn() may change the base address of instruction store
1517 * memory(p->store), so call it first before referencing the instruction
1518 * store pointer from an index
1519 */
1520 if (emit_endif)
1521 insn = next_insn(p, BRW_OPCODE_ENDIF);
1522
1523 /* Pop the IF and (optional) ELSE instructions from the stack */
1524 p->if_depth_in_loop[p->loop_stack_depth]--;
1525 tmp = pop_if_stack(p);
1526 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1527 else_inst = tmp;
1528 tmp = pop_if_stack(p);
1529 }
1530 if_inst = tmp;
1531
1532 if (!emit_endif) {
1533 /* ENDIF is useless; don't bother emitting it. */
1534 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1535 return;
1536 }
1537
1538 if (devinfo->gen < 6) {
1539 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1540 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1541 brw_set_src1(p, insn, brw_imm_d(0x0));
1542 } else if (devinfo->gen == 6) {
1543 brw_set_dest(p, insn, brw_imm_w(0));
1544 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1545 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1546 } else if (devinfo->gen == 7) {
1547 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1548 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1549 brw_set_src1(p, insn, brw_imm_w(0));
1550 } else {
1551 brw_set_src0(p, insn, brw_imm_d(0));
1552 }
1553
1554 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1555 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1556 if (devinfo->gen < 6)
1557 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1558
1559 /* Also pop item off the stack in the endif instruction: */
1560 if (devinfo->gen < 6) {
1561 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1562 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1563 } else if (devinfo->gen == 6) {
1564 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1565 } else {
1566 brw_inst_set_jip(devinfo, insn, 2);
1567 }
1568 patch_IF_ELSE(p, if_inst, else_inst, insn);
1569 }
1570
1571 brw_inst *
1572 brw_BREAK(struct brw_codegen *p)
1573 {
1574 const struct gen_device_info *devinfo = p->devinfo;
1575 brw_inst *insn;
1576
1577 insn = next_insn(p, BRW_OPCODE_BREAK);
1578 if (devinfo->gen >= 8) {
1579 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1580 brw_set_src0(p, insn, brw_imm_d(0x0));
1581 } else if (devinfo->gen >= 6) {
1582 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1583 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1584 brw_set_src1(p, insn, brw_imm_d(0x0));
1585 } else {
1586 brw_set_dest(p, insn, brw_ip_reg());
1587 brw_set_src0(p, insn, brw_ip_reg());
1588 brw_set_src1(p, insn, brw_imm_d(0x0));
1589 brw_inst_set_gen4_pop_count(devinfo, insn,
1590 p->if_depth_in_loop[p->loop_stack_depth]);
1591 }
1592 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1593 brw_inst_set_exec_size(devinfo, insn,
1594 brw_inst_exec_size(devinfo, p->current));
1595
1596 return insn;
1597 }
1598
1599 brw_inst *
1600 brw_CONT(struct brw_codegen *p)
1601 {
1602 const struct gen_device_info *devinfo = p->devinfo;
1603 brw_inst *insn;
1604
1605 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1606 brw_set_dest(p, insn, brw_ip_reg());
1607 if (devinfo->gen >= 8) {
1608 brw_set_src0(p, insn, brw_imm_d(0x0));
1609 } else {
1610 brw_set_src0(p, insn, brw_ip_reg());
1611 brw_set_src1(p, insn, brw_imm_d(0x0));
1612 }
1613
1614 if (devinfo->gen < 6) {
1615 brw_inst_set_gen4_pop_count(devinfo, insn,
1616 p->if_depth_in_loop[p->loop_stack_depth]);
1617 }
1618 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1619 brw_inst_set_exec_size(devinfo, insn,
1620 brw_inst_exec_size(devinfo, p->current));
1621 return insn;
1622 }
1623
1624 brw_inst *
1625 gen6_HALT(struct brw_codegen *p)
1626 {
1627 const struct gen_device_info *devinfo = p->devinfo;
1628 brw_inst *insn;
1629
1630 insn = next_insn(p, BRW_OPCODE_HALT);
1631 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1632 if (devinfo->gen >= 8) {
1633 brw_set_src0(p, insn, brw_imm_d(0x0));
1634 } else {
1635 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1636 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1637 }
1638
1639 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1640 brw_inst_set_exec_size(devinfo, insn,
1641 brw_inst_exec_size(devinfo, p->current));
1642 return insn;
1643 }
1644
1645 /* DO/WHILE loop:
1646 *
1647 * The DO/WHILE is just an unterminated loop -- break or continue are
1648 * used for control within the loop. We have a few ways they can be
1649 * done.
1650 *
1651 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1652 * jip and no DO instruction.
1653 *
1654 * For non-uniform control flow pre-gen6, there's a DO instruction to
1655 * push the mask, and a WHILE to jump back, and BREAK to get out and
1656 * pop the mask.
1657 *
1658 * For gen6, there's no more mask stack, so no need for DO. WHILE
1659 * just points back to the first instruction of the loop.
1660 */
1661 brw_inst *
1662 brw_DO(struct brw_codegen *p, unsigned execute_size)
1663 {
1664 const struct gen_device_info *devinfo = p->devinfo;
1665
1666 if (devinfo->gen >= 6 || p->single_program_flow) {
1667 push_loop_stack(p, &p->store[p->nr_insn]);
1668 return &p->store[p->nr_insn];
1669 } else {
1670 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1671
1672 push_loop_stack(p, insn);
1673
1674 /* Override the defaults for this instruction:
1675 */
1676 brw_set_dest(p, insn, brw_null_reg());
1677 brw_set_src0(p, insn, brw_null_reg());
1678 brw_set_src1(p, insn, brw_null_reg());
1679
1680 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1681 brw_inst_set_exec_size(devinfo, insn, execute_size);
1682 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1683
1684 return insn;
1685 }
1686 }
1687
1688 /**
1689 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1690 * instruction here.
1691 *
1692 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1693 * nesting, since it can always just point to the end of the block/current loop.
1694 */
1695 static void
1696 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1697 {
1698 const struct gen_device_info *devinfo = p->devinfo;
1699 brw_inst *do_inst = get_inner_do_insn(p);
1700 brw_inst *inst;
1701 unsigned br = brw_jump_scale(devinfo);
1702
1703 assert(devinfo->gen < 6);
1704
1705 for (inst = while_inst - 1; inst != do_inst; inst--) {
1706 /* If the jump count is != 0, that means that this instruction has already
1707 * been patched because it's part of a loop inside of the one we're
1708 * patching.
1709 */
1710 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1711 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1712 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1713 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1714 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1715 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1716 }
1717 }
1718 }
1719
1720 brw_inst *
1721 brw_WHILE(struct brw_codegen *p)
1722 {
1723 const struct gen_device_info *devinfo = p->devinfo;
1724 brw_inst *insn, *do_insn;
1725 unsigned br = brw_jump_scale(devinfo);
1726
1727 if (devinfo->gen >= 6) {
1728 insn = next_insn(p, BRW_OPCODE_WHILE);
1729 do_insn = get_inner_do_insn(p);
1730
1731 if (devinfo->gen >= 8) {
1732 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1733 brw_set_src0(p, insn, brw_imm_d(0));
1734 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1735 } else if (devinfo->gen == 7) {
1736 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1737 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1738 brw_set_src1(p, insn, brw_imm_w(0));
1739 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1740 } else {
1741 brw_set_dest(p, insn, brw_imm_w(0));
1742 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1743 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1744 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1745 }
1746
1747 brw_inst_set_exec_size(devinfo, insn,
1748 brw_inst_exec_size(devinfo, p->current));
1749
1750 } else {
1751 if (p->single_program_flow) {
1752 insn = next_insn(p, BRW_OPCODE_ADD);
1753 do_insn = get_inner_do_insn(p);
1754
1755 brw_set_dest(p, insn, brw_ip_reg());
1756 brw_set_src0(p, insn, brw_ip_reg());
1757 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1758 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1759 } else {
1760 insn = next_insn(p, BRW_OPCODE_WHILE);
1761 do_insn = get_inner_do_insn(p);
1762
1763 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1764
1765 brw_set_dest(p, insn, brw_ip_reg());
1766 brw_set_src0(p, insn, brw_ip_reg());
1767 brw_set_src1(p, insn, brw_imm_d(0));
1768
1769 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1770 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1771 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1772
1773 brw_patch_break_cont(p, insn);
1774 }
1775 }
1776 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1777
1778 p->loop_stack_depth--;
1779
1780 return insn;
1781 }
1782
1783 /* FORWARD JUMPS:
1784 */
1785 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1786 {
1787 const struct gen_device_info *devinfo = p->devinfo;
1788 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1789 unsigned jmpi = 1;
1790
1791 if (devinfo->gen >= 5)
1792 jmpi = 2;
1793
1794 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1795 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1796
1797 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1798 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1799 }
1800
1801 /* To integrate with the above, it makes sense that the comparison
1802 * instruction should populate the flag register. It might be simpler
1803 * just to use the flag reg for most WM tasks?
1804 */
1805 void brw_CMP(struct brw_codegen *p,
1806 struct brw_reg dest,
1807 unsigned conditional,
1808 struct brw_reg src0,
1809 struct brw_reg src1)
1810 {
1811 const struct gen_device_info *devinfo = p->devinfo;
1812 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1813
1814 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1815 brw_set_dest(p, insn, dest);
1816 brw_set_src0(p, insn, src0);
1817 brw_set_src1(p, insn, src1);
1818
1819 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1820 * page says:
1821 * "Any CMP instruction with a null destination must use a {switch}."
1822 *
1823 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1824 * mentioned on their work-arounds pages.
1825 */
1826 if (devinfo->gen == 7) {
1827 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1828 dest.nr == BRW_ARF_NULL) {
1829 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1830 }
1831 }
1832 }
1833
1834 /***********************************************************************
1835 * Helpers for the various SEND message types:
1836 */
1837
1838 /** Extended math function, float[8].
1839 */
1840 void gen4_math(struct brw_codegen *p,
1841 struct brw_reg dest,
1842 unsigned function,
1843 unsigned msg_reg_nr,
1844 struct brw_reg src,
1845 unsigned precision )
1846 {
1847 const struct gen_device_info *devinfo = p->devinfo;
1848 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1849 unsigned data_type;
1850 if (has_scalar_region(src)) {
1851 data_type = BRW_MATH_DATA_SCALAR;
1852 } else {
1853 data_type = BRW_MATH_DATA_VECTOR;
1854 }
1855
1856 assert(devinfo->gen < 6);
1857
1858 /* Example code doesn't set predicate_control for send
1859 * instructions.
1860 */
1861 brw_inst_set_pred_control(devinfo, insn, 0);
1862 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1863
1864 brw_set_dest(p, insn, dest);
1865 brw_set_src0(p, insn, src);
1866 brw_set_math_message(p,
1867 insn,
1868 function,
1869 src.type == BRW_REGISTER_TYPE_D,
1870 precision,
1871 data_type);
1872 }
1873
1874 void gen6_math(struct brw_codegen *p,
1875 struct brw_reg dest,
1876 unsigned function,
1877 struct brw_reg src0,
1878 struct brw_reg src1)
1879 {
1880 const struct gen_device_info *devinfo = p->devinfo;
1881 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1882
1883 assert(devinfo->gen >= 6);
1884
1885 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1886 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1887
1888 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1889 if (devinfo->gen == 6) {
1890 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1891 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1892 }
1893
1894 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1895 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1896 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1897 assert(src0.type != BRW_REGISTER_TYPE_F);
1898 assert(src1.type != BRW_REGISTER_TYPE_F);
1899 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1900 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1901 } else {
1902 assert(src0.type == BRW_REGISTER_TYPE_F);
1903 assert(src1.type == BRW_REGISTER_TYPE_F);
1904 }
1905
1906 /* Source modifiers are ignored for extended math instructions on Gen6. */
1907 if (devinfo->gen == 6) {
1908 assert(!src0.negate);
1909 assert(!src0.abs);
1910 assert(!src1.negate);
1911 assert(!src1.abs);
1912 }
1913
1914 brw_inst_set_math_function(devinfo, insn, function);
1915
1916 brw_set_dest(p, insn, dest);
1917 brw_set_src0(p, insn, src0);
1918 brw_set_src1(p, insn, src1);
1919 }
1920
1921 /**
1922 * Return the right surface index to access the thread scratch space using
1923 * stateless dataport messages.
1924 */
1925 unsigned
1926 brw_scratch_surface_idx(const struct brw_codegen *p)
1927 {
1928 /* The scratch space is thread-local so IA coherency is unnecessary. */
1929 if (p->devinfo->gen >= 8)
1930 return GEN8_BTI_STATELESS_NON_COHERENT;
1931 else
1932 return BRW_BTI_STATELESS;
1933 }
1934
1935 /**
1936 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1937 * using a constant offset per channel.
1938 *
1939 * The offset must be aligned to oword size (16 bytes). Used for
1940 * register spilling.
1941 */
1942 void brw_oword_block_write_scratch(struct brw_codegen *p,
1943 struct brw_reg mrf,
1944 int num_regs,
1945 unsigned offset)
1946 {
1947 const struct gen_device_info *devinfo = p->devinfo;
1948 const unsigned target_cache =
1949 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1950 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1951 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
1952 uint32_t msg_type;
1953
1954 if (devinfo->gen >= 6)
1955 offset /= 16;
1956
1957 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1958
1959 const unsigned mlen = 1 + num_regs;
1960
1961 /* Set up the message header. This is g0, with g0.2 filled with
1962 * the offset. We don't want to leave our offset around in g0 or
1963 * it'll screw up texture samples, so set it up inside the message
1964 * reg.
1965 */
1966 {
1967 brw_push_insn_state(p);
1968 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1969 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1970 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1971
1972 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1973
1974 /* set message header global offset field (reg 0, element 2) */
1975 brw_MOV(p,
1976 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1977 mrf.nr,
1978 2), BRW_REGISTER_TYPE_UD),
1979 brw_imm_ud(offset));
1980
1981 brw_pop_insn_state(p);
1982 }
1983
1984 {
1985 struct brw_reg dest;
1986 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1987 int send_commit_msg;
1988 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1989 BRW_REGISTER_TYPE_UW);
1990
1991 brw_inst_set_compression(devinfo, insn, false);
1992
1993 if (brw_inst_exec_size(devinfo, insn) >= 16)
1994 src_header = vec16(src_header);
1995
1996 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1997 if (devinfo->gen < 6)
1998 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
1999
2000 /* Until gen6, writes followed by reads from the same location
2001 * are not guaranteed to be ordered unless write_commit is set.
2002 * If set, then a no-op write is issued to the destination
2003 * register to set a dependency, and a read from the destination
2004 * can be used to ensure the ordering.
2005 *
2006 * For gen6, only writes between different threads need ordering
2007 * protection. Our use of DP writes is all about register
2008 * spilling within a thread.
2009 */
2010 if (devinfo->gen >= 6) {
2011 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2012 send_commit_msg = 0;
2013 } else {
2014 dest = src_header;
2015 send_commit_msg = 1;
2016 }
2017
2018 brw_set_dest(p, insn, dest);
2019 if (devinfo->gen >= 6) {
2020 brw_set_src0(p, insn, mrf);
2021 } else {
2022 brw_set_src0(p, insn, brw_null_reg());
2023 }
2024
2025 if (devinfo->gen >= 6)
2026 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2027 else
2028 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2029
2030 brw_set_dp_write_message(p,
2031 insn,
2032 brw_scratch_surface_idx(p),
2033 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2034 msg_type,
2035 target_cache,
2036 mlen,
2037 true, /* header_present */
2038 0, /* not a render target */
2039 send_commit_msg, /* response_length */
2040 0, /* eot */
2041 send_commit_msg);
2042 }
2043 }
2044
2045
2046 /**
2047 * Read a block of owords (half a GRF each) from the scratch buffer
2048 * using a constant index per channel.
2049 *
2050 * Offset must be aligned to oword size (16 bytes). Used for register
2051 * spilling.
2052 */
2053 void
2054 brw_oword_block_read_scratch(struct brw_codegen *p,
2055 struct brw_reg dest,
2056 struct brw_reg mrf,
2057 int num_regs,
2058 unsigned offset)
2059 {
2060 const struct gen_device_info *devinfo = p->devinfo;
2061
2062 if (devinfo->gen >= 6)
2063 offset /= 16;
2064
2065 if (p->devinfo->gen >= 7) {
2066 /* On gen 7 and above, we no longer have message registers and we can
2067 * send from any register we want. By using the destination register
2068 * for the message, we guarantee that the implied message write won't
2069 * accidentally overwrite anything. This has been a problem because
2070 * the MRF registers and source for the final FB write are both fixed
2071 * and may overlap.
2072 */
2073 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2074 } else {
2075 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2076 }
2077 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2078
2079 const unsigned rlen = num_regs;
2080 const unsigned target_cache =
2081 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2082 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2083 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2084
2085 {
2086 brw_push_insn_state(p);
2087 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2088 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2089 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2090
2091 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2092
2093 /* set message header global offset field (reg 0, element 2) */
2094 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2095
2096 brw_pop_insn_state(p);
2097 }
2098
2099 {
2100 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2101
2102 assert(brw_inst_pred_control(devinfo, insn) == 0);
2103 brw_inst_set_compression(devinfo, insn, false);
2104
2105 brw_set_dest(p, insn, dest); /* UW? */
2106 if (devinfo->gen >= 6) {
2107 brw_set_src0(p, insn, mrf);
2108 } else {
2109 brw_set_src0(p, insn, brw_null_reg());
2110 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2111 }
2112
2113 brw_set_dp_read_message(p,
2114 insn,
2115 brw_scratch_surface_idx(p),
2116 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2117 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2118 target_cache,
2119 1, /* msg_length */
2120 true, /* header_present */
2121 rlen);
2122 }
2123 }
2124
2125 void
2126 gen7_block_read_scratch(struct brw_codegen *p,
2127 struct brw_reg dest,
2128 int num_regs,
2129 unsigned offset)
2130 {
2131 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2132 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2133
2134 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2135
2136 /* The HW requires that the header is present; this is to get the g0.5
2137 * scratch offset.
2138 */
2139 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2140
2141 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2142 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2143 * is 32 bytes, which happens to be the size of a register.
2144 */
2145 offset /= REG_SIZE;
2146 assert(offset < (1 << 12));
2147
2148 gen7_set_dp_scratch_message(p, insn,
2149 false, /* scratch read */
2150 false, /* OWords */
2151 false, /* invalidate after read */
2152 num_regs,
2153 offset,
2154 1, /* mlen: just g0 */
2155 num_regs, /* rlen */
2156 true); /* header present */
2157 }
2158
2159 /**
2160 * Read float[4] vectors from the data port constant cache.
2161 * Location (in buffer) should be a multiple of 16.
2162 * Used for fetching shader constants.
2163 */
2164 void brw_oword_block_read(struct brw_codegen *p,
2165 struct brw_reg dest,
2166 struct brw_reg mrf,
2167 uint32_t offset,
2168 uint32_t bind_table_index)
2169 {
2170 const struct gen_device_info *devinfo = p->devinfo;
2171 const unsigned target_cache =
2172 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2173 BRW_DATAPORT_READ_TARGET_DATA_CACHE);
2174 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
2175
2176 /* On newer hardware, offset is in units of owords. */
2177 if (devinfo->gen >= 6)
2178 offset /= 16;
2179
2180 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2181
2182 brw_push_insn_state(p);
2183 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2184 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2185 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2186
2187 brw_push_insn_state(p);
2188 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2189 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2190
2191 /* set message header global offset field (reg 0, element 2) */
2192 brw_MOV(p,
2193 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2194 mrf.nr,
2195 2), BRW_REGISTER_TYPE_UD),
2196 brw_imm_ud(offset));
2197 brw_pop_insn_state(p);
2198
2199 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2200
2201 /* cast dest to a uword[8] vector */
2202 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2203
2204 brw_set_dest(p, insn, dest);
2205 if (devinfo->gen >= 6) {
2206 brw_set_src0(p, insn, mrf);
2207 } else {
2208 brw_set_src0(p, insn, brw_null_reg());
2209 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2210 }
2211
2212 brw_set_dp_read_message(p, insn, bind_table_index,
2213 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2214 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2215 target_cache,
2216 1, /* msg_length */
2217 true, /* header_present */
2218 DIV_ROUND_UP(exec_size, 8)); /* response_length */
2219
2220 brw_pop_insn_state(p);
2221 }
2222
2223
2224 void brw_fb_WRITE(struct brw_codegen *p,
2225 struct brw_reg payload,
2226 struct brw_reg implied_header,
2227 unsigned msg_control,
2228 unsigned binding_table_index,
2229 unsigned msg_length,
2230 unsigned response_length,
2231 bool eot,
2232 bool last_render_target,
2233 bool header_present)
2234 {
2235 const struct gen_device_info *devinfo = p->devinfo;
2236 const unsigned target_cache =
2237 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2238 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2239 brw_inst *insn;
2240 unsigned msg_type;
2241 struct brw_reg dest, src0;
2242
2243 if (brw_inst_exec_size(devinfo, p->current) >= BRW_EXECUTE_16)
2244 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2245 else
2246 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2247
2248 if (devinfo->gen >= 6) {
2249 insn = next_insn(p, BRW_OPCODE_SENDC);
2250 } else {
2251 insn = next_insn(p, BRW_OPCODE_SEND);
2252 }
2253 brw_inst_set_compression(devinfo, insn, false);
2254
2255 if (devinfo->gen >= 6) {
2256 /* headerless version, just submit color payload */
2257 src0 = payload;
2258
2259 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2260 } else {
2261 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2262 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2263 src0 = implied_header;
2264
2265 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2266 }
2267
2268 brw_set_dest(p, insn, dest);
2269 brw_set_src0(p, insn, src0);
2270 brw_set_dp_write_message(p,
2271 insn,
2272 binding_table_index,
2273 msg_control,
2274 msg_type,
2275 target_cache,
2276 msg_length,
2277 header_present,
2278 last_render_target,
2279 response_length,
2280 eot,
2281 0 /* send_commit_msg */);
2282 }
2283
2284 brw_inst *
2285 gen9_fb_READ(struct brw_codegen *p,
2286 struct brw_reg dst,
2287 struct brw_reg payload,
2288 unsigned binding_table_index,
2289 unsigned msg_length,
2290 unsigned response_length,
2291 bool per_sample)
2292 {
2293 const struct gen_device_info *devinfo = p->devinfo;
2294 assert(devinfo->gen >= 9);
2295 const unsigned msg_subtype =
2296 brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16 ? 0 : 1;
2297 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2298
2299 brw_set_dest(p, insn, dst);
2300 brw_set_src0(p, insn, payload);
2301 brw_set_dp_read_message(p, insn, binding_table_index,
2302 per_sample << 5 | msg_subtype,
2303 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2304 GEN6_SFID_DATAPORT_RENDER_CACHE,
2305 msg_length, true /* header_present */,
2306 response_length);
2307 brw_inst_set_rt_slot_group(devinfo, insn,
2308 brw_inst_qtr_control(devinfo, p->current) / 2);
2309
2310 return insn;
2311 }
2312
2313 /**
2314 * Texture sample instruction.
2315 * Note: the msg_type plus msg_length values determine exactly what kind
2316 * of sampling operation is performed. See volume 4, page 161 of docs.
2317 */
2318 void brw_SAMPLE(struct brw_codegen *p,
2319 struct brw_reg dest,
2320 unsigned msg_reg_nr,
2321 struct brw_reg src0,
2322 unsigned binding_table_index,
2323 unsigned sampler,
2324 unsigned msg_type,
2325 unsigned response_length,
2326 unsigned msg_length,
2327 unsigned header_present,
2328 unsigned simd_mode,
2329 unsigned return_format)
2330 {
2331 const struct gen_device_info *devinfo = p->devinfo;
2332 brw_inst *insn;
2333
2334 if (msg_reg_nr != -1)
2335 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2336
2337 insn = next_insn(p, BRW_OPCODE_SEND);
2338 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2339
2340 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2341 *
2342 * "Instruction compression is not allowed for this instruction (that
2343 * is, send). The hardware behavior is undefined if this instruction is
2344 * set as compressed. However, compress control can be set to "SecHalf"
2345 * to affect the EMask generation."
2346 *
2347 * No similar wording is found in later PRMs, but there are examples
2348 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2349 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2350 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2351 */
2352 brw_inst_set_compression(devinfo, insn, false);
2353
2354 if (devinfo->gen < 6)
2355 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2356
2357 brw_set_dest(p, insn, dest);
2358 brw_set_src0(p, insn, src0);
2359 brw_set_sampler_message(p, insn,
2360 binding_table_index,
2361 sampler,
2362 msg_type,
2363 response_length,
2364 msg_length,
2365 header_present,
2366 simd_mode,
2367 return_format);
2368 }
2369
2370 /* Adjust the message header's sampler state pointer to
2371 * select the correct group of 16 samplers.
2372 */
2373 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2374 struct brw_reg header,
2375 struct brw_reg sampler_index)
2376 {
2377 /* The "Sampler Index" field can only store values between 0 and 15.
2378 * However, we can add an offset to the "Sampler State Pointer"
2379 * field, effectively selecting a different set of 16 samplers.
2380 *
2381 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2382 * offset, and each sampler state is only 16-bytes, so we can't
2383 * exclusively use the offset - we have to use both.
2384 */
2385
2386 const struct gen_device_info *devinfo = p->devinfo;
2387
2388 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2389 const int sampler_state_size = 16; /* 16 bytes */
2390 uint32_t sampler = sampler_index.ud;
2391
2392 if (sampler >= 16) {
2393 assert(devinfo->is_haswell || devinfo->gen >= 8);
2394 brw_ADD(p,
2395 get_element_ud(header, 3),
2396 get_element_ud(brw_vec8_grf(0, 0), 3),
2397 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2398 }
2399 } else {
2400 /* Non-const sampler array indexing case */
2401 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2402 return;
2403 }
2404
2405 struct brw_reg temp = get_element_ud(header, 3);
2406
2407 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2408 brw_SHL(p, temp, temp, brw_imm_ud(4));
2409 brw_ADD(p,
2410 get_element_ud(header, 3),
2411 get_element_ud(brw_vec8_grf(0, 0), 3),
2412 temp);
2413 }
2414 }
2415
2416 /* All these variables are pretty confusing - we might be better off
2417 * using bitmasks and macros for this, in the old style. Or perhaps
2418 * just having the caller instantiate the fields in dword3 itself.
2419 */
2420 void brw_urb_WRITE(struct brw_codegen *p,
2421 struct brw_reg dest,
2422 unsigned msg_reg_nr,
2423 struct brw_reg src0,
2424 enum brw_urb_write_flags flags,
2425 unsigned msg_length,
2426 unsigned response_length,
2427 unsigned offset,
2428 unsigned swizzle)
2429 {
2430 const struct gen_device_info *devinfo = p->devinfo;
2431 brw_inst *insn;
2432
2433 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2434
2435 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2436 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2437 brw_push_insn_state(p);
2438 brw_set_default_access_mode(p, BRW_ALIGN_1);
2439 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2440 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2441 BRW_REGISTER_TYPE_UD),
2442 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2443 brw_imm_ud(0xff00));
2444 brw_pop_insn_state(p);
2445 }
2446
2447 insn = next_insn(p, BRW_OPCODE_SEND);
2448
2449 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2450
2451 brw_set_dest(p, insn, dest);
2452 brw_set_src0(p, insn, src0);
2453 brw_set_src1(p, insn, brw_imm_d(0));
2454
2455 if (devinfo->gen < 6)
2456 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2457
2458 brw_set_urb_message(p,
2459 insn,
2460 flags,
2461 msg_length,
2462 response_length,
2463 offset,
2464 swizzle);
2465 }
2466
2467 struct brw_inst *
2468 brw_send_indirect_message(struct brw_codegen *p,
2469 unsigned sfid,
2470 struct brw_reg dst,
2471 struct brw_reg payload,
2472 struct brw_reg desc)
2473 {
2474 const struct gen_device_info *devinfo = p->devinfo;
2475 struct brw_inst *send;
2476 int setup;
2477
2478 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2479
2480 assert(desc.type == BRW_REGISTER_TYPE_UD);
2481
2482 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2483 * in the indirect case) by its index in the instruction store. The
2484 * pointer returned by next_insn() may become invalid if emitting the SEND
2485 * in the indirect case reallocs the store.
2486 */
2487
2488 if (desc.file == BRW_IMMEDIATE_VALUE) {
2489 setup = p->nr_insn;
2490 send = next_insn(p, BRW_OPCODE_SEND);
2491 brw_set_src1(p, send, desc);
2492
2493 } else {
2494 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2495
2496 brw_push_insn_state(p);
2497 brw_set_default_access_mode(p, BRW_ALIGN_1);
2498 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2499 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2500
2501 /* Load the indirect descriptor to an address register using OR so the
2502 * caller can specify additional descriptor bits with the usual
2503 * brw_set_*_message() helper functions.
2504 */
2505 setup = p->nr_insn;
2506 brw_OR(p, addr, desc, brw_imm_ud(0));
2507
2508 brw_pop_insn_state(p);
2509
2510 send = next_insn(p, BRW_OPCODE_SEND);
2511 brw_set_src1(p, send, addr);
2512 }
2513
2514 if (dst.width < BRW_EXECUTE_8)
2515 brw_inst_set_exec_size(devinfo, send, dst.width);
2516
2517 brw_set_dest(p, send, dst);
2518 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2519 brw_inst_set_sfid(devinfo, send, sfid);
2520
2521 return &p->store[setup];
2522 }
2523
2524 static struct brw_inst *
2525 brw_send_indirect_surface_message(struct brw_codegen *p,
2526 unsigned sfid,
2527 struct brw_reg dst,
2528 struct brw_reg payload,
2529 struct brw_reg surface,
2530 unsigned message_len,
2531 unsigned response_len,
2532 bool header_present)
2533 {
2534 const struct gen_device_info *devinfo = p->devinfo;
2535 struct brw_inst *insn;
2536
2537 if (surface.file != BRW_IMMEDIATE_VALUE) {
2538 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2539
2540 brw_push_insn_state(p);
2541 brw_set_default_access_mode(p, BRW_ALIGN_1);
2542 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2543 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2544
2545 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2546 * some surface array is accessed out of bounds.
2547 */
2548 insn = brw_AND(p, addr,
2549 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2550 BRW_GET_SWZ(surface.swizzle, 0)),
2551 brw_imm_ud(0xff));
2552
2553 brw_pop_insn_state(p);
2554
2555 surface = addr;
2556 }
2557
2558 insn = brw_send_indirect_message(p, sfid, dst, payload, surface);
2559 brw_inst_set_mlen(devinfo, insn, message_len);
2560 brw_inst_set_rlen(devinfo, insn, response_len);
2561 brw_inst_set_header_present(devinfo, insn, header_present);
2562
2563 return insn;
2564 }
2565
2566 static bool
2567 while_jumps_before_offset(const struct gen_device_info *devinfo,
2568 brw_inst *insn, int while_offset, int start_offset)
2569 {
2570 int scale = 16 / brw_jump_scale(devinfo);
2571 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2572 : brw_inst_jip(devinfo, insn);
2573 assert(jip < 0);
2574 return while_offset + jip * scale <= start_offset;
2575 }
2576
2577
2578 static int
2579 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2580 {
2581 int offset;
2582 void *store = p->store;
2583 const struct gen_device_info *devinfo = p->devinfo;
2584
2585 int depth = 0;
2586
2587 for (offset = next_offset(devinfo, store, start_offset);
2588 offset < p->next_insn_offset;
2589 offset = next_offset(devinfo, store, offset)) {
2590 brw_inst *insn = store + offset;
2591
2592 switch (brw_inst_opcode(devinfo, insn)) {
2593 case BRW_OPCODE_IF:
2594 depth++;
2595 break;
2596 case BRW_OPCODE_ENDIF:
2597 if (depth == 0)
2598 return offset;
2599 depth--;
2600 break;
2601 case BRW_OPCODE_WHILE:
2602 /* If the while doesn't jump before our instruction, it's the end
2603 * of a sibling do...while loop. Ignore it.
2604 */
2605 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2606 continue;
2607 /* fallthrough */
2608 case BRW_OPCODE_ELSE:
2609 case BRW_OPCODE_HALT:
2610 if (depth == 0)
2611 return offset;
2612 }
2613 }
2614
2615 return 0;
2616 }
2617
2618 /* There is no DO instruction on gen6, so to find the end of the loop
2619 * we have to see if the loop is jumping back before our start
2620 * instruction.
2621 */
2622 static int
2623 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2624 {
2625 const struct gen_device_info *devinfo = p->devinfo;
2626 int offset;
2627 void *store = p->store;
2628
2629 assert(devinfo->gen >= 6);
2630
2631 /* Always start after the instruction (such as a WHILE) we're trying to fix
2632 * up.
2633 */
2634 for (offset = next_offset(devinfo, store, start_offset);
2635 offset < p->next_insn_offset;
2636 offset = next_offset(devinfo, store, offset)) {
2637 brw_inst *insn = store + offset;
2638
2639 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2640 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2641 return offset;
2642 }
2643 }
2644 assert(!"not reached");
2645 return start_offset;
2646 }
2647
2648 /* After program generation, go back and update the UIP and JIP of
2649 * BREAK, CONT, and HALT instructions to their correct locations.
2650 */
2651 void
2652 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2653 {
2654 const struct gen_device_info *devinfo = p->devinfo;
2655 int offset;
2656 int br = brw_jump_scale(devinfo);
2657 int scale = 16 / br;
2658 void *store = p->store;
2659
2660 if (devinfo->gen < 6)
2661 return;
2662
2663 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2664 brw_inst *insn = store + offset;
2665 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2666
2667 int block_end_offset = brw_find_next_block_end(p, offset);
2668 switch (brw_inst_opcode(devinfo, insn)) {
2669 case BRW_OPCODE_BREAK:
2670 assert(block_end_offset != 0);
2671 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2672 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2673 brw_inst_set_uip(devinfo, insn,
2674 (brw_find_loop_end(p, offset) - offset +
2675 (devinfo->gen == 6 ? 16 : 0)) / scale);
2676 break;
2677 case BRW_OPCODE_CONTINUE:
2678 assert(block_end_offset != 0);
2679 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2680 brw_inst_set_uip(devinfo, insn,
2681 (brw_find_loop_end(p, offset) - offset) / scale);
2682
2683 assert(brw_inst_uip(devinfo, insn) != 0);
2684 assert(brw_inst_jip(devinfo, insn) != 0);
2685 break;
2686
2687 case BRW_OPCODE_ENDIF: {
2688 int32_t jump = (block_end_offset == 0) ?
2689 1 * br : (block_end_offset - offset) / scale;
2690 if (devinfo->gen >= 7)
2691 brw_inst_set_jip(devinfo, insn, jump);
2692 else
2693 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2694 break;
2695 }
2696
2697 case BRW_OPCODE_HALT:
2698 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2699 *
2700 * "In case of the halt instruction not inside any conditional
2701 * code block, the value of <JIP> and <UIP> should be the
2702 * same. In case of the halt instruction inside conditional code
2703 * block, the <UIP> should be the end of the program, and the
2704 * <JIP> should be end of the most inner conditional code block."
2705 *
2706 * The uip will have already been set by whoever set up the
2707 * instruction.
2708 */
2709 if (block_end_offset == 0) {
2710 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2711 } else {
2712 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2713 }
2714 assert(brw_inst_uip(devinfo, insn) != 0);
2715 assert(brw_inst_jip(devinfo, insn) != 0);
2716 break;
2717 }
2718 }
2719 }
2720
2721 void brw_ff_sync(struct brw_codegen *p,
2722 struct brw_reg dest,
2723 unsigned msg_reg_nr,
2724 struct brw_reg src0,
2725 bool allocate,
2726 unsigned response_length,
2727 bool eot)
2728 {
2729 const struct gen_device_info *devinfo = p->devinfo;
2730 brw_inst *insn;
2731
2732 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2733
2734 insn = next_insn(p, BRW_OPCODE_SEND);
2735 brw_set_dest(p, insn, dest);
2736 brw_set_src0(p, insn, src0);
2737 brw_set_src1(p, insn, brw_imm_d(0));
2738
2739 if (devinfo->gen < 6)
2740 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2741
2742 brw_set_ff_sync_message(p,
2743 insn,
2744 allocate,
2745 response_length,
2746 eot);
2747 }
2748
2749 /**
2750 * Emit the SEND instruction necessary to generate stream output data on Gen6
2751 * (for transform feedback).
2752 *
2753 * If send_commit_msg is true, this is the last piece of stream output data
2754 * from this thread, so send the data as a committed write. According to the
2755 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2756 *
2757 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2758 * writes are complete by sending the final write as a committed write."
2759 */
2760 void
2761 brw_svb_write(struct brw_codegen *p,
2762 struct brw_reg dest,
2763 unsigned msg_reg_nr,
2764 struct brw_reg src0,
2765 unsigned binding_table_index,
2766 bool send_commit_msg)
2767 {
2768 const struct gen_device_info *devinfo = p->devinfo;
2769 const unsigned target_cache =
2770 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2771 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2772 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2773 brw_inst *insn;
2774
2775 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2776
2777 insn = next_insn(p, BRW_OPCODE_SEND);
2778 brw_set_dest(p, insn, dest);
2779 brw_set_src0(p, insn, src0);
2780 brw_set_src1(p, insn, brw_imm_d(0));
2781 brw_set_dp_write_message(p, insn,
2782 binding_table_index,
2783 0, /* msg_control: ignored */
2784 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2785 target_cache,
2786 1, /* msg_length */
2787 true, /* header_present */
2788 0, /* last_render_target: ignored */
2789 send_commit_msg, /* response_length */
2790 0, /* end_of_thread */
2791 send_commit_msg); /* send_commit_msg */
2792 }
2793
2794 static unsigned
2795 brw_surface_payload_size(struct brw_codegen *p,
2796 unsigned num_channels,
2797 bool has_simd4x2,
2798 bool has_simd16)
2799 {
2800 if (has_simd4x2 &&
2801 brw_inst_access_mode(p->devinfo, p->current) == BRW_ALIGN_16)
2802 return 1;
2803 else if (has_simd16 &&
2804 brw_inst_exec_size(p->devinfo, p->current) == BRW_EXECUTE_16)
2805 return 2 * num_channels;
2806 else
2807 return num_channels;
2808 }
2809
2810 static void
2811 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2812 brw_inst *insn,
2813 unsigned atomic_op,
2814 bool response_expected)
2815 {
2816 const struct gen_device_info *devinfo = p->devinfo;
2817 unsigned msg_control =
2818 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2819 (response_expected ? 1 << 5 : 0); /* Return data expected */
2820
2821 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2822 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2823 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2824 msg_control |= 1 << 4; /* SIMD8 mode */
2825
2826 brw_inst_set_dp_msg_type(devinfo, insn,
2827 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2828 } else {
2829 brw_inst_set_dp_msg_type(devinfo, insn,
2830 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2831 }
2832 } else {
2833 brw_inst_set_dp_msg_type(devinfo, insn,
2834 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2835
2836 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2837 msg_control |= 1 << 4; /* SIMD8 mode */
2838 }
2839
2840 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2841 }
2842
2843 void
2844 brw_untyped_atomic(struct brw_codegen *p,
2845 struct brw_reg dst,
2846 struct brw_reg payload,
2847 struct brw_reg surface,
2848 unsigned atomic_op,
2849 unsigned msg_length,
2850 bool response_expected)
2851 {
2852 const struct gen_device_info *devinfo = p->devinfo;
2853 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2854 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2855 GEN7_SFID_DATAPORT_DATA_CACHE);
2856 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2857 /* Mask out unused components -- This is especially important in Align16
2858 * mode on generations that don't have native support for SIMD4x2 atomics,
2859 * because unused but enabled components will cause the dataport to perform
2860 * additional atomic operations on the addresses that happen to be in the
2861 * uninitialized Y, Z and W coordinates of the payload.
2862 */
2863 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2864 struct brw_inst *insn = brw_send_indirect_surface_message(
2865 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2866 brw_surface_payload_size(p, response_expected,
2867 devinfo->gen >= 8 || devinfo->is_haswell, true),
2868 align1);
2869
2870 brw_set_dp_untyped_atomic_message(
2871 p, insn, atomic_op, response_expected);
2872 }
2873
2874 static void
2875 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2876 struct brw_inst *insn,
2877 unsigned num_channels)
2878 {
2879 const struct gen_device_info *devinfo = p->devinfo;
2880 /* Set mask of 32-bit channels to drop. */
2881 unsigned msg_control = 0xf & (0xf << num_channels);
2882
2883 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2884 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2885 msg_control |= 1 << 4; /* SIMD16 mode */
2886 else
2887 msg_control |= 2 << 4; /* SIMD8 mode */
2888 }
2889
2890 brw_inst_set_dp_msg_type(devinfo, insn,
2891 (devinfo->gen >= 8 || devinfo->is_haswell ?
2892 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2893 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
2894 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2895 }
2896
2897 void
2898 brw_untyped_surface_read(struct brw_codegen *p,
2899 struct brw_reg dst,
2900 struct brw_reg payload,
2901 struct brw_reg surface,
2902 unsigned msg_length,
2903 unsigned num_channels)
2904 {
2905 const struct gen_device_info *devinfo = p->devinfo;
2906 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2907 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2908 GEN7_SFID_DATAPORT_DATA_CACHE);
2909 struct brw_inst *insn = brw_send_indirect_surface_message(
2910 p, sfid, dst, payload, surface, msg_length,
2911 brw_surface_payload_size(p, num_channels, true, true),
2912 false);
2913
2914 brw_set_dp_untyped_surface_read_message(
2915 p, insn, num_channels);
2916 }
2917
2918 static void
2919 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
2920 struct brw_inst *insn,
2921 unsigned num_channels)
2922 {
2923 const struct gen_device_info *devinfo = p->devinfo;
2924 /* Set mask of 32-bit channels to drop. */
2925 unsigned msg_control = 0xf & (0xf << num_channels);
2926
2927 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2928 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2929 msg_control |= 1 << 4; /* SIMD16 mode */
2930 else
2931 msg_control |= 2 << 4; /* SIMD8 mode */
2932 } else {
2933 if (devinfo->gen >= 8 || devinfo->is_haswell)
2934 msg_control |= 0 << 4; /* SIMD4x2 mode */
2935 else
2936 msg_control |= 2 << 4; /* SIMD8 mode */
2937 }
2938
2939 brw_inst_set_dp_msg_type(devinfo, insn,
2940 devinfo->gen >= 8 || devinfo->is_haswell ?
2941 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2942 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2943 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2944 }
2945
2946 void
2947 brw_untyped_surface_write(struct brw_codegen *p,
2948 struct brw_reg payload,
2949 struct brw_reg surface,
2950 unsigned msg_length,
2951 unsigned num_channels)
2952 {
2953 const struct gen_device_info *devinfo = p->devinfo;
2954 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2955 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2956 GEN7_SFID_DATAPORT_DATA_CACHE);
2957 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2958 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2959 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2960 WRITEMASK_X : WRITEMASK_XYZW;
2961 struct brw_inst *insn = brw_send_indirect_surface_message(
2962 p, sfid, brw_writemask(brw_null_reg(), mask),
2963 payload, surface, msg_length, 0, align1);
2964
2965 brw_set_dp_untyped_surface_write_message(
2966 p, insn, num_channels);
2967 }
2968
2969 static void
2970 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
2971 struct brw_inst *insn,
2972 unsigned atomic_op,
2973 bool response_expected)
2974 {
2975 const struct gen_device_info *devinfo = p->devinfo;
2976 unsigned msg_control =
2977 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2978 (response_expected ? 1 << 5 : 0); /* Return data expected */
2979
2980 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2981 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2982 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
2983 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
2984
2985 brw_inst_set_dp_msg_type(devinfo, insn,
2986 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
2987 } else {
2988 brw_inst_set_dp_msg_type(devinfo, insn,
2989 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
2990 }
2991
2992 } else {
2993 brw_inst_set_dp_msg_type(devinfo, insn,
2994 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
2995
2996 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
2997 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
2998 }
2999
3000 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3001 }
3002
3003 void
3004 brw_typed_atomic(struct brw_codegen *p,
3005 struct brw_reg dst,
3006 struct brw_reg payload,
3007 struct brw_reg surface,
3008 unsigned atomic_op,
3009 unsigned msg_length,
3010 bool response_expected) {
3011 const struct gen_device_info *devinfo = p->devinfo;
3012 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3013 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3014 GEN6_SFID_DATAPORT_RENDER_CACHE);
3015 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3016 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3017 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3018 struct brw_inst *insn = brw_send_indirect_surface_message(
3019 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3020 brw_surface_payload_size(p, response_expected,
3021 devinfo->gen >= 8 || devinfo->is_haswell, false),
3022 true);
3023
3024 brw_set_dp_typed_atomic_message(
3025 p, insn, atomic_op, response_expected);
3026 }
3027
3028 static void
3029 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3030 struct brw_inst *insn,
3031 unsigned num_channels)
3032 {
3033 const struct gen_device_info *devinfo = p->devinfo;
3034 /* Set mask of unused channels. */
3035 unsigned msg_control = 0xf & (0xf << num_channels);
3036
3037 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3038 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3039 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3040 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3041 else
3042 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3043 }
3044
3045 brw_inst_set_dp_msg_type(devinfo, insn,
3046 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3047 } else {
3048 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3049 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3050 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3051 }
3052
3053 brw_inst_set_dp_msg_type(devinfo, insn,
3054 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3055 }
3056
3057 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3058 }
3059
3060 void
3061 brw_typed_surface_read(struct brw_codegen *p,
3062 struct brw_reg dst,
3063 struct brw_reg payload,
3064 struct brw_reg surface,
3065 unsigned msg_length,
3066 unsigned num_channels)
3067 {
3068 const struct gen_device_info *devinfo = p->devinfo;
3069 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3070 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3071 GEN6_SFID_DATAPORT_RENDER_CACHE);
3072 struct brw_inst *insn = brw_send_indirect_surface_message(
3073 p, sfid, dst, payload, surface, msg_length,
3074 brw_surface_payload_size(p, num_channels,
3075 devinfo->gen >= 8 || devinfo->is_haswell, false),
3076 true);
3077
3078 brw_set_dp_typed_surface_read_message(
3079 p, insn, num_channels);
3080 }
3081
3082 static void
3083 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3084 struct brw_inst *insn,
3085 unsigned num_channels)
3086 {
3087 const struct gen_device_info *devinfo = p->devinfo;
3088 /* Set mask of unused channels. */
3089 unsigned msg_control = 0xf & (0xf << num_channels);
3090
3091 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3092 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3093 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3094 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3095 else
3096 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3097 }
3098
3099 brw_inst_set_dp_msg_type(devinfo, insn,
3100 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3101
3102 } else {
3103 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3104 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3105 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3106 }
3107
3108 brw_inst_set_dp_msg_type(devinfo, insn,
3109 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3110 }
3111
3112 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3113 }
3114
3115 void
3116 brw_typed_surface_write(struct brw_codegen *p,
3117 struct brw_reg payload,
3118 struct brw_reg surface,
3119 unsigned msg_length,
3120 unsigned num_channels)
3121 {
3122 const struct gen_device_info *devinfo = p->devinfo;
3123 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3124 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3125 GEN6_SFID_DATAPORT_RENDER_CACHE);
3126 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3127 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3128 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3129 WRITEMASK_X : WRITEMASK_XYZW);
3130 struct brw_inst *insn = brw_send_indirect_surface_message(
3131 p, sfid, brw_writemask(brw_null_reg(), mask),
3132 payload, surface, msg_length, 0, true);
3133
3134 brw_set_dp_typed_surface_write_message(
3135 p, insn, num_channels);
3136 }
3137
3138 static void
3139 brw_set_memory_fence_message(struct brw_codegen *p,
3140 struct brw_inst *insn,
3141 enum brw_message_target sfid,
3142 bool commit_enable)
3143 {
3144 const struct gen_device_info *devinfo = p->devinfo;
3145
3146 brw_set_message_descriptor(p, insn, sfid,
3147 1 /* message length */,
3148 (commit_enable ? 1 : 0) /* response length */,
3149 true /* header present */,
3150 false);
3151
3152 switch (sfid) {
3153 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3154 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3155 break;
3156 case GEN7_SFID_DATAPORT_DATA_CACHE:
3157 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3158 break;
3159 default:
3160 unreachable("Not reached");
3161 }
3162
3163 if (commit_enable)
3164 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3165 }
3166
3167 void
3168 brw_memory_fence(struct brw_codegen *p,
3169 struct brw_reg dst)
3170 {
3171 const struct gen_device_info *devinfo = p->devinfo;
3172 const bool commit_enable = devinfo->gen == 7 && !devinfo->is_haswell;
3173 struct brw_inst *insn;
3174
3175 brw_push_insn_state(p);
3176 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3177 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3178 dst = vec1(dst);
3179
3180 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3181 * message doesn't write anything back.
3182 */
3183 insn = next_insn(p, BRW_OPCODE_SEND);
3184 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3185 brw_set_dest(p, insn, dst);
3186 brw_set_src0(p, insn, dst);
3187 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3188 commit_enable);
3189
3190 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3191 /* IVB does typed surface access through the render cache, so we need to
3192 * flush it too. Use a different register so both flushes can be
3193 * pipelined by the hardware.
3194 */
3195 insn = next_insn(p, BRW_OPCODE_SEND);
3196 brw_set_dest(p, insn, offset(dst, 1));
3197 brw_set_src0(p, insn, offset(dst, 1));
3198 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3199 commit_enable);
3200
3201 /* Now write the response of the second message into the response of the
3202 * first to trigger a pipeline stall -- This way future render and data
3203 * cache messages will be properly ordered with respect to past data and
3204 * render cache messages.
3205 */
3206 brw_MOV(p, dst, offset(dst, 1));
3207 }
3208
3209 brw_pop_insn_state(p);
3210 }
3211
3212 void
3213 brw_pixel_interpolator_query(struct brw_codegen *p,
3214 struct brw_reg dest,
3215 struct brw_reg mrf,
3216 bool noperspective,
3217 unsigned mode,
3218 struct brw_reg data,
3219 unsigned msg_length,
3220 unsigned response_length)
3221 {
3222 const struct gen_device_info *devinfo = p->devinfo;
3223 struct brw_inst *insn;
3224 const uint16_t exec_size = brw_inst_exec_size(devinfo, p->current);
3225
3226 /* brw_send_indirect_message will automatically use a direct send message
3227 * if data is actually immediate.
3228 */
3229 insn = brw_send_indirect_message(p,
3230 GEN7_SFID_PIXEL_INTERPOLATOR,
3231 dest,
3232 mrf,
3233 vec1(data));
3234 brw_inst_set_mlen(devinfo, insn, msg_length);
3235 brw_inst_set_rlen(devinfo, insn, response_length);
3236
3237 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3238 brw_inst_set_pi_slot_group(devinfo, insn, 0); /* zero unless 32/64px dispatch */
3239 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3240 brw_inst_set_pi_message_type(devinfo, insn, mode);
3241 }
3242
3243 void
3244 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3245 struct brw_reg mask)
3246 {
3247 const struct gen_device_info *devinfo = p->devinfo;
3248 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
3249 const unsigned qtr_control = brw_inst_qtr_control(devinfo, p->current);
3250 brw_inst *inst;
3251
3252 assert(devinfo->gen >= 7);
3253 assert(mask.type == BRW_REGISTER_TYPE_UD);
3254
3255 brw_push_insn_state(p);
3256
3257 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3258 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3259
3260 if (devinfo->gen >= 8) {
3261 /* Getting the first active channel index is easy on Gen8: Just find
3262 * the first bit set in the execution mask. The register exists on
3263 * HSW already but it reads back as all ones when the current
3264 * instruction has execution masking disabled, so it's kind of
3265 * useless.
3266 */
3267 struct brw_reg exec_mask =
3268 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3269
3270 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3271 /* Unfortunately, ce0 does not take into account the thread
3272 * dispatch mask, which may be a problem in cases where it's not
3273 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3274 * some n). Combine ce0 with the given dispatch (or vector) mask
3275 * to mask off those channels which were never dispatched by the
3276 * hardware.
3277 */
3278 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3279 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3280 exec_mask = vec1(dst);
3281 }
3282
3283 /* Quarter control has the effect of magically shifting the value of
3284 * ce0 so you'll get the first active channel relative to the
3285 * specified quarter control as result.
3286 */
3287 inst = brw_FBL(p, vec1(dst), exec_mask);
3288 } else {
3289 const struct brw_reg flag = brw_flag_reg(1, 0);
3290
3291 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3292
3293 /* Run enough instructions returning zero with execution masking and
3294 * a conditional modifier enabled in order to get the full execution
3295 * mask in f1.0. We could use a single 32-wide move here if it
3296 * weren't because of the hardware bug that causes channel enables to
3297 * be applied incorrectly to the second half of 32-wide instructions
3298 * on Gen7.
3299 */
3300 const unsigned lower_size = MIN2(16, exec_size);
3301 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3302 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3303 brw_imm_uw(0));
3304 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3305 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3306 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3307 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3308 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3309 }
3310
3311 /* Find the first bit set in the exec_size-wide portion of the flag
3312 * register that was updated by the last sequence of MOV
3313 * instructions.
3314 */
3315 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3316 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3317 }
3318 } else {
3319 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3320
3321 if (devinfo->gen >= 8 &&
3322 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3323 /* In SIMD4x2 mode the first active channel index is just the
3324 * negation of the first bit of the mask register. Note that ce0
3325 * doesn't take into account the dispatch mask, so the Gen7 path
3326 * should be used instead unless you have the guarantee that the
3327 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3328 * for some n).
3329 */
3330 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3331 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3332 brw_imm_ud(1));
3333
3334 } else {
3335 /* Overwrite the destination without and with execution masking to
3336 * find out which of the channels is active.
3337 */
3338 brw_push_insn_state(p);
3339 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3340 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3341 brw_imm_ud(1));
3342
3343 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3344 brw_imm_ud(0));
3345 brw_pop_insn_state(p);
3346 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3347 }
3348 }
3349
3350 brw_pop_insn_state(p);
3351 }
3352
3353 void
3354 brw_broadcast(struct brw_codegen *p,
3355 struct brw_reg dst,
3356 struct brw_reg src,
3357 struct brw_reg idx)
3358 {
3359 const struct gen_device_info *devinfo = p->devinfo;
3360 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3361 brw_inst *inst;
3362
3363 brw_push_insn_state(p);
3364 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3365 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3366
3367 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3368 src.address_mode == BRW_ADDRESS_DIRECT);
3369
3370 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3371 idx.file == BRW_IMMEDIATE_VALUE) {
3372 /* Trivial, the source is already uniform or the index is a constant.
3373 * We will typically not get here if the optimizer is doing its job, but
3374 * asserting would be mean.
3375 */
3376 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3377 brw_MOV(p, dst,
3378 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3379 stride(suboffset(src, 4 * i), 0, 4, 1)));
3380 } else {
3381 if (align1) {
3382 const struct brw_reg addr =
3383 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3384 const unsigned offset = src.nr * REG_SIZE + src.subnr;
3385 /* Limit in bytes of the signed indirect addressing immediate. */
3386 const unsigned limit = 512;
3387
3388 brw_push_insn_state(p);
3389 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3390 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3391
3392 /* Take into account the component size and horizontal stride. */
3393 assert(src.vstride == src.hstride + src.width);
3394 brw_SHL(p, addr, vec1(idx),
3395 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3396 src.hstride - 1));
3397
3398 /* We can only address up to limit bytes using the indirect
3399 * addressing immediate, account for the difference if the source
3400 * register is above this limit.
3401 */
3402 if (offset >= limit)
3403 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3404
3405 brw_pop_insn_state(p);
3406
3407 /* Use indirect addressing to fetch the specified component. */
3408 brw_MOV(p, dst,
3409 retype(brw_vec1_indirect(addr.subnr, offset % limit),
3410 src.type));
3411 } else {
3412 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3413 * to all bits of a flag register,
3414 */
3415 inst = brw_MOV(p,
3416 brw_null_reg(),
3417 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3418 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3419 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3420 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3421
3422 /* and use predicated SEL to pick the right channel. */
3423 inst = brw_SEL(p, dst,
3424 stride(suboffset(src, 4), 4, 4, 1),
3425 stride(src, 4, 4, 1));
3426 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3427 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3428 }
3429 }
3430
3431 brw_pop_insn_state(p);
3432 }
3433
3434 /**
3435 * This instruction is generated as a single-channel align1 instruction by
3436 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3437 *
3438 * We can't use the typed atomic op in the FS because that has the execution
3439 * mask ANDed with the pixel mask, but we just want to write the one dword for
3440 * all the pixels.
3441 *
3442 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3443 * one u32. So we use the same untyped atomic write message as the pixel
3444 * shader.
3445 *
3446 * The untyped atomic operation requires a BUFFER surface type with RAW
3447 * format, and is only accessible through the legacy DATA_CACHE dataport
3448 * messages.
3449 */
3450 void brw_shader_time_add(struct brw_codegen *p,
3451 struct brw_reg payload,
3452 uint32_t surf_index)
3453 {
3454 const unsigned sfid = (p->devinfo->gen >= 8 || p->devinfo->is_haswell ?
3455 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3456 GEN7_SFID_DATAPORT_DATA_CACHE);
3457 assert(p->devinfo->gen >= 7);
3458
3459 brw_push_insn_state(p);
3460 brw_set_default_access_mode(p, BRW_ALIGN_1);
3461 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3462 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3463 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3464
3465 /* We use brw_vec1_reg and unmasked because we want to increment the given
3466 * offset only once.
3467 */
3468 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3469 BRW_ARF_NULL, 0));
3470 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3471 payload.nr, 0));
3472 brw_set_src1(p, send, brw_imm_ud(0));
3473 brw_set_message_descriptor(p, send, sfid, 2, 0, false, false);
3474 brw_inst_set_binding_table_index(p->devinfo, send, surf_index);
3475 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3476
3477 brw_pop_insn_state(p);
3478 }
3479
3480
3481 /**
3482 * Emit the SEND message for a barrier
3483 */
3484 void
3485 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3486 {
3487 const struct gen_device_info *devinfo = p->devinfo;
3488 struct brw_inst *inst;
3489
3490 assert(devinfo->gen >= 7);
3491
3492 brw_push_insn_state(p);
3493 brw_set_default_access_mode(p, BRW_ALIGN_1);
3494 inst = next_insn(p, BRW_OPCODE_SEND);
3495 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3496 brw_set_src0(p, inst, src);
3497 brw_set_src1(p, inst, brw_null_reg());
3498
3499 brw_set_message_descriptor(p, inst, BRW_SFID_MESSAGE_GATEWAY,
3500 1 /* msg_length */,
3501 0 /* response_length */,
3502 false /* header_present */,
3503 false /* end_of_thread */);
3504
3505 brw_inst_set_gateway_notify(devinfo, inst, 1);
3506 brw_inst_set_gateway_subfuncid(devinfo, inst,
3507 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3508
3509 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3510 brw_pop_insn_state(p);
3511 }
3512
3513
3514 /**
3515 * Emit the wait instruction for a barrier
3516 */
3517 void
3518 brw_WAIT(struct brw_codegen *p)
3519 {
3520 const struct gen_device_info *devinfo = p->devinfo;
3521 struct brw_inst *insn;
3522
3523 struct brw_reg src = brw_notification_reg();
3524
3525 insn = next_insn(p, BRW_OPCODE_WAIT);
3526 brw_set_dest(p, insn, src);
3527 brw_set_src0(p, insn, src);
3528 brw_set_src1(p, insn, brw_null_reg());
3529
3530 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3531 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3532 }