intel/eu: Get rid of the return value of brw_send_indirect_message().
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Specify the descriptor and extended descriptor immediate for a SEND(C)
364 * message instruction.
365 */
366 void
367 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
368 unsigned desc, unsigned ex_desc)
369 {
370 const struct gen_device_info *devinfo = p->devinfo;
371 brw_inst_set_src1_file_type(devinfo, inst,
372 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_D);
373 brw_inst_set_send_desc(devinfo, inst, desc);
374 if (devinfo->gen >= 9 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
375 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC))
376 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
377 }
378
379 static void brw_set_math_message( struct brw_codegen *p,
380 brw_inst *inst,
381 unsigned function,
382 unsigned integer_type,
383 bool low_precision,
384 unsigned dataType )
385 {
386 const struct gen_device_info *devinfo = p->devinfo;
387 unsigned msg_length;
388 unsigned response_length;
389
390 /* Infer message length from the function */
391 switch (function) {
392 case BRW_MATH_FUNCTION_POW:
393 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
394 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
395 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
396 msg_length = 2;
397 break;
398 default:
399 msg_length = 1;
400 break;
401 }
402
403 /* Infer response length from the function */
404 switch (function) {
405 case BRW_MATH_FUNCTION_SINCOS:
406 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
407 response_length = 2;
408 break;
409 default:
410 response_length = 1;
411 break;
412 }
413
414 brw_set_desc(p, inst, brw_message_desc(
415 devinfo, msg_length, response_length, false));
416
417 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
418 brw_inst_set_math_msg_function(devinfo, inst, function);
419 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
420 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
421 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
422 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
423 brw_inst_set_saturate(devinfo, inst, 0);
424 }
425
426
427 static void brw_set_ff_sync_message(struct brw_codegen *p,
428 brw_inst *insn,
429 bool allocate,
430 unsigned response_length,
431 bool end_of_thread)
432 {
433 const struct gen_device_info *devinfo = p->devinfo;
434
435 brw_set_desc(p, insn, brw_message_desc(
436 devinfo, 1, response_length, true));
437
438 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
439 brw_inst_set_eot(devinfo, insn, end_of_thread);
440 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
441 brw_inst_set_urb_allocate(devinfo, insn, allocate);
442 /* The following fields are not used by FF_SYNC: */
443 brw_inst_set_urb_global_offset(devinfo, insn, 0);
444 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
445 brw_inst_set_urb_used(devinfo, insn, 0);
446 brw_inst_set_urb_complete(devinfo, insn, 0);
447 }
448
449 static void brw_set_urb_message( struct brw_codegen *p,
450 brw_inst *insn,
451 enum brw_urb_write_flags flags,
452 unsigned msg_length,
453 unsigned response_length,
454 unsigned offset,
455 unsigned swizzle_control )
456 {
457 const struct gen_device_info *devinfo = p->devinfo;
458
459 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
460 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
461 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
462
463 brw_set_desc(p, insn, brw_message_desc(
464 devinfo, msg_length, response_length, true));
465
466 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
467 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
468
469 if (flags & BRW_URB_WRITE_OWORD) {
470 assert(msg_length == 2); /* header + one OWORD of data */
471 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
472 } else {
473 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
474 }
475
476 brw_inst_set_urb_global_offset(devinfo, insn, offset);
477 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
478
479 if (devinfo->gen < 8) {
480 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
481 }
482
483 if (devinfo->gen < 7) {
484 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
485 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
486 } else {
487 brw_inst_set_urb_per_slot_offset(devinfo, insn,
488 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
489 }
490 }
491
492 static void
493 gen7_set_dp_scratch_message(struct brw_codegen *p,
494 brw_inst *inst,
495 bool write,
496 bool dword,
497 bool invalidate_after_read,
498 unsigned num_regs,
499 unsigned addr_offset,
500 unsigned mlen,
501 unsigned rlen,
502 bool header_present)
503 {
504 const struct gen_device_info *devinfo = p->devinfo;
505 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
506 (devinfo->gen >= 8 && num_regs == 8));
507 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
508 num_regs - 1);
509
510 brw_set_desc(p, inst, brw_message_desc(
511 devinfo, mlen, rlen, header_present));
512
513 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
514 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
515 brw_inst_set_scratch_read_write(devinfo, inst, write);
516 brw_inst_set_scratch_type(devinfo, inst, dword);
517 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
518 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
519 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
520 }
521
522 static void
523 brw_inst_set_state(const struct gen_device_info *devinfo,
524 brw_inst *insn,
525 const struct brw_insn_state *state)
526 {
527 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
528 brw_inst_set_group(devinfo, insn, state->group);
529 brw_inst_set_compression(devinfo, insn, state->compressed);
530 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
531 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
532 brw_inst_set_saturate(devinfo, insn, state->saturate);
533 brw_inst_set_pred_control(devinfo, insn, state->predicate);
534 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
535
536 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
537 state->access_mode == BRW_ALIGN_16) {
538 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
539 if (devinfo->gen >= 7)
540 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
541 } else {
542 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
543 if (devinfo->gen >= 7)
544 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
545 }
546
547 if (devinfo->gen >= 6)
548 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
549 }
550
551 #define next_insn brw_next_insn
552 brw_inst *
553 brw_next_insn(struct brw_codegen *p, unsigned opcode)
554 {
555 const struct gen_device_info *devinfo = p->devinfo;
556 brw_inst *insn;
557
558 if (p->nr_insn + 1 > p->store_size) {
559 p->store_size <<= 1;
560 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
561 }
562
563 p->next_insn_offset += 16;
564 insn = &p->store[p->nr_insn++];
565
566 memset(insn, 0, sizeof(*insn));
567 brw_inst_set_opcode(devinfo, insn, opcode);
568
569 /* Apply the default instruction state */
570 brw_inst_set_state(devinfo, insn, p->current);
571
572 return insn;
573 }
574
575 static brw_inst *
576 brw_alu1(struct brw_codegen *p, unsigned opcode,
577 struct brw_reg dest, struct brw_reg src)
578 {
579 brw_inst *insn = next_insn(p, opcode);
580 brw_set_dest(p, insn, dest);
581 brw_set_src0(p, insn, src);
582 return insn;
583 }
584
585 static brw_inst *
586 brw_alu2(struct brw_codegen *p, unsigned opcode,
587 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
588 {
589 /* 64-bit immediates are only supported on 1-src instructions */
590 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
591 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
592
593 brw_inst *insn = next_insn(p, opcode);
594 brw_set_dest(p, insn, dest);
595 brw_set_src0(p, insn, src0);
596 brw_set_src1(p, insn, src1);
597 return insn;
598 }
599
600 static int
601 get_3src_subreg_nr(struct brw_reg reg)
602 {
603 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
604 * use 32-bit units (components 0..7). Since they only support F/D/UD
605 * types, this doesn't lose any flexibility, but uses fewer bits.
606 */
607 return reg.subnr / 4;
608 }
609
610 static enum gen10_align1_3src_vertical_stride
611 to_3src_align1_vstride(enum brw_vertical_stride vstride)
612 {
613 switch (vstride) {
614 case BRW_VERTICAL_STRIDE_0:
615 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
616 case BRW_VERTICAL_STRIDE_2:
617 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
618 case BRW_VERTICAL_STRIDE_4:
619 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
620 case BRW_VERTICAL_STRIDE_8:
621 case BRW_VERTICAL_STRIDE_16:
622 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
623 default:
624 unreachable("invalid vstride");
625 }
626 }
627
628
629 static enum gen10_align1_3src_src_horizontal_stride
630 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
631 {
632 switch (hstride) {
633 case BRW_HORIZONTAL_STRIDE_0:
634 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
635 case BRW_HORIZONTAL_STRIDE_1:
636 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
637 case BRW_HORIZONTAL_STRIDE_2:
638 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
639 case BRW_HORIZONTAL_STRIDE_4:
640 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
641 default:
642 unreachable("invalid hstride");
643 }
644 }
645
646 static brw_inst *
647 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
648 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
649 {
650 const struct gen_device_info *devinfo = p->devinfo;
651 brw_inst *inst = next_insn(p, opcode);
652
653 gen7_convert_mrf_to_grf(p, &dest);
654
655 assert(dest.nr < 128);
656 assert(src0.nr < 128);
657 assert(src1.nr < 128);
658 assert(src2.nr < 128);
659 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
660 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
661 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
662 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
663
664 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
665 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
666 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
667
668 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
669 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
670 BRW_ALIGN1_3SRC_ACCUMULATOR);
671 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
672 } else {
673 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
674 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
675 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
676 }
677 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
678
679 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
680
681 if (brw_reg_type_is_floating_point(dest.type)) {
682 brw_inst_set_3src_a1_exec_type(devinfo, inst,
683 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
684 } else {
685 brw_inst_set_3src_a1_exec_type(devinfo, inst,
686 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
687 }
688
689 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
690 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
691 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
692 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
693
694 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
695 to_3src_align1_vstride(src0.vstride));
696 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
697 to_3src_align1_vstride(src1.vstride));
698 /* no vstride on src2 */
699
700 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
701 to_3src_align1_hstride(src0.hstride));
702 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
703 to_3src_align1_hstride(src1.hstride));
704 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
705 to_3src_align1_hstride(src2.hstride));
706
707 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
708 if (src0.type == BRW_REGISTER_TYPE_NF) {
709 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
710 } else {
711 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
712 }
713 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
714 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
715
716 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
717 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
718 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
719 } else {
720 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
721 }
722 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
723 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
724
725 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
726 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
727 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
728 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
729
730 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
731 src0.file == BRW_IMMEDIATE_VALUE ||
732 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
733 src0.type == BRW_REGISTER_TYPE_NF));
734 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
735 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
736 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
737 src2.file == BRW_IMMEDIATE_VALUE);
738
739 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
740 src0.file == BRW_GENERAL_REGISTER_FILE ?
741 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
742 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
743 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
744 src1.file == BRW_GENERAL_REGISTER_FILE ?
745 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
746 BRW_ALIGN1_3SRC_ACCUMULATOR);
747 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
748 src2.file == BRW_GENERAL_REGISTER_FILE ?
749 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
750 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
751 } else {
752 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
753 dest.file == BRW_MESSAGE_REGISTER_FILE);
754 assert(dest.type == BRW_REGISTER_TYPE_F ||
755 dest.type == BRW_REGISTER_TYPE_DF ||
756 dest.type == BRW_REGISTER_TYPE_D ||
757 dest.type == BRW_REGISTER_TYPE_UD);
758 if (devinfo->gen == 6) {
759 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
760 dest.file == BRW_MESSAGE_REGISTER_FILE);
761 }
762 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
763 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
764 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
765
766 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
767 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
768 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
769 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
770 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
771 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
772 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
773 src0.vstride == BRW_VERTICAL_STRIDE_0);
774
775 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
776 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
777 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
778 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
779 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
780 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
781 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
782 src1.vstride == BRW_VERTICAL_STRIDE_0);
783
784 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
785 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
786 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
787 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
788 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
789 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
790 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
791 src2.vstride == BRW_VERTICAL_STRIDE_0);
792
793 if (devinfo->gen >= 7) {
794 /* Set both the source and destination types based on dest.type,
795 * ignoring the source register types. The MAD and LRP emitters ensure
796 * that all four types are float. The BFE and BFI2 emitters, however,
797 * may send us mixed D and UD types and want us to ignore that and use
798 * the destination type.
799 */
800 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
801 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
802 }
803 }
804
805 return inst;
806 }
807
808
809 /***********************************************************************
810 * Convenience routines.
811 */
812 #define ALU1(OP) \
813 brw_inst *brw_##OP(struct brw_codegen *p, \
814 struct brw_reg dest, \
815 struct brw_reg src0) \
816 { \
817 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
818 }
819
820 #define ALU2(OP) \
821 brw_inst *brw_##OP(struct brw_codegen *p, \
822 struct brw_reg dest, \
823 struct brw_reg src0, \
824 struct brw_reg src1) \
825 { \
826 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
827 }
828
829 #define ALU3(OP) \
830 brw_inst *brw_##OP(struct brw_codegen *p, \
831 struct brw_reg dest, \
832 struct brw_reg src0, \
833 struct brw_reg src1, \
834 struct brw_reg src2) \
835 { \
836 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
837 }
838
839 #define ALU3F(OP) \
840 brw_inst *brw_##OP(struct brw_codegen *p, \
841 struct brw_reg dest, \
842 struct brw_reg src0, \
843 struct brw_reg src1, \
844 struct brw_reg src2) \
845 { \
846 assert(dest.type == BRW_REGISTER_TYPE_F || \
847 dest.type == BRW_REGISTER_TYPE_DF); \
848 if (dest.type == BRW_REGISTER_TYPE_F) { \
849 assert(src0.type == BRW_REGISTER_TYPE_F); \
850 assert(src1.type == BRW_REGISTER_TYPE_F); \
851 assert(src2.type == BRW_REGISTER_TYPE_F); \
852 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
853 assert(src0.type == BRW_REGISTER_TYPE_DF); \
854 assert(src1.type == BRW_REGISTER_TYPE_DF); \
855 assert(src2.type == BRW_REGISTER_TYPE_DF); \
856 } \
857 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
858 }
859
860 /* Rounding operations (other than RNDD) require two instructions - the first
861 * stores a rounded value (possibly the wrong way) in the dest register, but
862 * also sets a per-channel "increment bit" in the flag register. A predicated
863 * add of 1.0 fixes dest to contain the desired result.
864 *
865 * Sandybridge and later appear to round correctly without an ADD.
866 */
867 #define ROUND(OP) \
868 void brw_##OP(struct brw_codegen *p, \
869 struct brw_reg dest, \
870 struct brw_reg src) \
871 { \
872 const struct gen_device_info *devinfo = p->devinfo; \
873 brw_inst *rnd, *add; \
874 rnd = next_insn(p, BRW_OPCODE_##OP); \
875 brw_set_dest(p, rnd, dest); \
876 brw_set_src0(p, rnd, src); \
877 \
878 if (devinfo->gen < 6) { \
879 /* turn on round-increments */ \
880 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
881 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
882 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
883 } \
884 }
885
886
887 ALU2(SEL)
888 ALU1(NOT)
889 ALU2(AND)
890 ALU2(OR)
891 ALU2(XOR)
892 ALU2(SHR)
893 ALU2(SHL)
894 ALU1(DIM)
895 ALU2(ASR)
896 ALU3(CSEL)
897 ALU1(FRC)
898 ALU1(RNDD)
899 ALU2(MAC)
900 ALU2(MACH)
901 ALU1(LZD)
902 ALU2(DP4)
903 ALU2(DPH)
904 ALU2(DP3)
905 ALU2(DP2)
906 ALU3(MAD)
907 ALU3F(LRP)
908 ALU1(BFREV)
909 ALU3(BFE)
910 ALU2(BFI1)
911 ALU3(BFI2)
912 ALU1(FBH)
913 ALU1(FBL)
914 ALU1(CBIT)
915 ALU2(ADDC)
916 ALU2(SUBB)
917
918 ROUND(RNDZ)
919 ROUND(RNDE)
920
921 brw_inst *
922 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
923 {
924 const struct gen_device_info *devinfo = p->devinfo;
925
926 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
927 * To avoid the problems that causes, we use a <1,2,0> source region to read
928 * each element twice.
929 */
930 if (devinfo->gen == 7 && !devinfo->is_haswell &&
931 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
932 dest.type == BRW_REGISTER_TYPE_DF &&
933 (src0.type == BRW_REGISTER_TYPE_F ||
934 src0.type == BRW_REGISTER_TYPE_D ||
935 src0.type == BRW_REGISTER_TYPE_UD) &&
936 !has_scalar_region(src0)) {
937 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
938 src0.width == BRW_WIDTH_4 &&
939 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
940
941 src0.vstride = BRW_VERTICAL_STRIDE_1;
942 src0.width = BRW_WIDTH_2;
943 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
944 }
945
946 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
947 }
948
949 brw_inst *
950 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
951 struct brw_reg src0, struct brw_reg src1)
952 {
953 /* 6.2.2: add */
954 if (src0.type == BRW_REGISTER_TYPE_F ||
955 (src0.file == BRW_IMMEDIATE_VALUE &&
956 src0.type == BRW_REGISTER_TYPE_VF)) {
957 assert(src1.type != BRW_REGISTER_TYPE_UD);
958 assert(src1.type != BRW_REGISTER_TYPE_D);
959 }
960
961 if (src1.type == BRW_REGISTER_TYPE_F ||
962 (src1.file == BRW_IMMEDIATE_VALUE &&
963 src1.type == BRW_REGISTER_TYPE_VF)) {
964 assert(src0.type != BRW_REGISTER_TYPE_UD);
965 assert(src0.type != BRW_REGISTER_TYPE_D);
966 }
967
968 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
969 }
970
971 brw_inst *
972 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
973 struct brw_reg src0, struct brw_reg src1)
974 {
975 assert(dest.type == src0.type);
976 assert(src0.type == src1.type);
977 switch (src0.type) {
978 case BRW_REGISTER_TYPE_B:
979 case BRW_REGISTER_TYPE_UB:
980 case BRW_REGISTER_TYPE_W:
981 case BRW_REGISTER_TYPE_UW:
982 case BRW_REGISTER_TYPE_D:
983 case BRW_REGISTER_TYPE_UD:
984 break;
985 default:
986 unreachable("Bad type for brw_AVG");
987 }
988
989 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
990 }
991
992 brw_inst *
993 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
994 struct brw_reg src0, struct brw_reg src1)
995 {
996 /* 6.32.38: mul */
997 if (src0.type == BRW_REGISTER_TYPE_D ||
998 src0.type == BRW_REGISTER_TYPE_UD ||
999 src1.type == BRW_REGISTER_TYPE_D ||
1000 src1.type == BRW_REGISTER_TYPE_UD) {
1001 assert(dest.type != BRW_REGISTER_TYPE_F);
1002 }
1003
1004 if (src0.type == BRW_REGISTER_TYPE_F ||
1005 (src0.file == BRW_IMMEDIATE_VALUE &&
1006 src0.type == BRW_REGISTER_TYPE_VF)) {
1007 assert(src1.type != BRW_REGISTER_TYPE_UD);
1008 assert(src1.type != BRW_REGISTER_TYPE_D);
1009 }
1010
1011 if (src1.type == BRW_REGISTER_TYPE_F ||
1012 (src1.file == BRW_IMMEDIATE_VALUE &&
1013 src1.type == BRW_REGISTER_TYPE_VF)) {
1014 assert(src0.type != BRW_REGISTER_TYPE_UD);
1015 assert(src0.type != BRW_REGISTER_TYPE_D);
1016 }
1017
1018 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1019 src0.nr != BRW_ARF_ACCUMULATOR);
1020 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1021 src1.nr != BRW_ARF_ACCUMULATOR);
1022
1023 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1024 }
1025
1026 brw_inst *
1027 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1028 struct brw_reg src0, struct brw_reg src1)
1029 {
1030 src0.vstride = BRW_VERTICAL_STRIDE_0;
1031 src0.width = BRW_WIDTH_1;
1032 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1033 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1034 }
1035
1036 brw_inst *
1037 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1038 struct brw_reg src0, struct brw_reg src1)
1039 {
1040 src0.vstride = BRW_VERTICAL_STRIDE_0;
1041 src0.width = BRW_WIDTH_1;
1042 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1043 src1.vstride = BRW_VERTICAL_STRIDE_8;
1044 src1.width = BRW_WIDTH_8;
1045 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1046 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1047 }
1048
1049 brw_inst *
1050 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1051 {
1052 const struct gen_device_info *devinfo = p->devinfo;
1053 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1054 /* The F32TO16 instruction doesn't support 32-bit destination types in
1055 * Align1 mode, and neither does the Gen8 implementation in terms of a
1056 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1057 * an undocumented feature.
1058 */
1059 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1060 (!align16 || devinfo->gen >= 8));
1061 brw_inst *inst;
1062
1063 if (align16) {
1064 assert(dst.type == BRW_REGISTER_TYPE_UD);
1065 } else {
1066 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1067 dst.type == BRW_REGISTER_TYPE_W ||
1068 dst.type == BRW_REGISTER_TYPE_UW ||
1069 dst.type == BRW_REGISTER_TYPE_HF);
1070 }
1071
1072 brw_push_insn_state(p);
1073
1074 if (needs_zero_fill) {
1075 brw_set_default_access_mode(p, BRW_ALIGN_1);
1076 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1077 }
1078
1079 if (devinfo->gen >= 8) {
1080 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1081 } else {
1082 assert(devinfo->gen == 7);
1083 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1084 }
1085
1086 if (needs_zero_fill) {
1087 brw_inst_set_no_dd_clear(devinfo, inst, true);
1088 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1089 brw_inst_set_no_dd_check(devinfo, inst, true);
1090 }
1091
1092 brw_pop_insn_state(p);
1093 return inst;
1094 }
1095
1096 brw_inst *
1097 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1098 {
1099 const struct gen_device_info *devinfo = p->devinfo;
1100 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1101
1102 if (align16) {
1103 assert(src.type == BRW_REGISTER_TYPE_UD);
1104 } else {
1105 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1106 *
1107 * Because this instruction does not have a 16-bit floating-point
1108 * type, the source data type must be Word (W). The destination type
1109 * must be F (Float).
1110 */
1111 if (src.type == BRW_REGISTER_TYPE_UD)
1112 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1113
1114 assert(src.type == BRW_REGISTER_TYPE_W ||
1115 src.type == BRW_REGISTER_TYPE_UW ||
1116 src.type == BRW_REGISTER_TYPE_HF);
1117 }
1118
1119 if (devinfo->gen >= 8) {
1120 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1121 } else {
1122 assert(devinfo->gen == 7);
1123 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1124 }
1125 }
1126
1127
1128 void brw_NOP(struct brw_codegen *p)
1129 {
1130 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1131 memset(insn, 0, sizeof(*insn));
1132 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1133 }
1134
1135
1136
1137
1138
1139 /***********************************************************************
1140 * Comparisons, if/else/endif
1141 */
1142
1143 brw_inst *
1144 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1145 unsigned predicate_control)
1146 {
1147 const struct gen_device_info *devinfo = p->devinfo;
1148 struct brw_reg ip = brw_ip_reg();
1149 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1150
1151 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1152 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1153 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1154 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1155
1156 return inst;
1157 }
1158
1159 static void
1160 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1161 {
1162 p->if_stack[p->if_stack_depth] = inst - p->store;
1163
1164 p->if_stack_depth++;
1165 if (p->if_stack_array_size <= p->if_stack_depth) {
1166 p->if_stack_array_size *= 2;
1167 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1168 p->if_stack_array_size);
1169 }
1170 }
1171
1172 static brw_inst *
1173 pop_if_stack(struct brw_codegen *p)
1174 {
1175 p->if_stack_depth--;
1176 return &p->store[p->if_stack[p->if_stack_depth]];
1177 }
1178
1179 static void
1180 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1181 {
1182 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1183 p->loop_stack_array_size *= 2;
1184 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1185 p->loop_stack_array_size);
1186 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1187 p->loop_stack_array_size);
1188 }
1189
1190 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1191 p->loop_stack_depth++;
1192 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1193 }
1194
1195 static brw_inst *
1196 get_inner_do_insn(struct brw_codegen *p)
1197 {
1198 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1199 }
1200
1201 /* EU takes the value from the flag register and pushes it onto some
1202 * sort of a stack (presumably merging with any flag value already on
1203 * the stack). Within an if block, the flags at the top of the stack
1204 * control execution on each channel of the unit, eg. on each of the
1205 * 16 pixel values in our wm programs.
1206 *
1207 * When the matching 'else' instruction is reached (presumably by
1208 * countdown of the instruction count patched in by our ELSE/ENDIF
1209 * functions), the relevant flags are inverted.
1210 *
1211 * When the matching 'endif' instruction is reached, the flags are
1212 * popped off. If the stack is now empty, normal execution resumes.
1213 */
1214 brw_inst *
1215 brw_IF(struct brw_codegen *p, unsigned execute_size)
1216 {
1217 const struct gen_device_info *devinfo = p->devinfo;
1218 brw_inst *insn;
1219
1220 insn = next_insn(p, BRW_OPCODE_IF);
1221
1222 /* Override the defaults for this instruction:
1223 */
1224 if (devinfo->gen < 6) {
1225 brw_set_dest(p, insn, brw_ip_reg());
1226 brw_set_src0(p, insn, brw_ip_reg());
1227 brw_set_src1(p, insn, brw_imm_d(0x0));
1228 } else if (devinfo->gen == 6) {
1229 brw_set_dest(p, insn, brw_imm_w(0));
1230 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1231 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1232 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1233 } else if (devinfo->gen == 7) {
1234 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1235 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1236 brw_set_src1(p, insn, brw_imm_w(0));
1237 brw_inst_set_jip(devinfo, insn, 0);
1238 brw_inst_set_uip(devinfo, insn, 0);
1239 } else {
1240 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1241 brw_set_src0(p, insn, brw_imm_d(0));
1242 brw_inst_set_jip(devinfo, insn, 0);
1243 brw_inst_set_uip(devinfo, insn, 0);
1244 }
1245
1246 brw_inst_set_exec_size(devinfo, insn, execute_size);
1247 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1248 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1249 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1250 if (!p->single_program_flow && devinfo->gen < 6)
1251 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1252
1253 push_if_stack(p, insn);
1254 p->if_depth_in_loop[p->loop_stack_depth]++;
1255 return insn;
1256 }
1257
1258 /* This function is only used for gen6-style IF instructions with an
1259 * embedded comparison (conditional modifier). It is not used on gen7.
1260 */
1261 brw_inst *
1262 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1263 struct brw_reg src0, struct brw_reg src1)
1264 {
1265 const struct gen_device_info *devinfo = p->devinfo;
1266 brw_inst *insn;
1267
1268 insn = next_insn(p, BRW_OPCODE_IF);
1269
1270 brw_set_dest(p, insn, brw_imm_w(0));
1271 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1272 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1273 brw_set_src0(p, insn, src0);
1274 brw_set_src1(p, insn, src1);
1275
1276 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1277 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1278 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1279
1280 push_if_stack(p, insn);
1281 return insn;
1282 }
1283
1284 /**
1285 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1286 */
1287 static void
1288 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1289 brw_inst *if_inst, brw_inst *else_inst)
1290 {
1291 const struct gen_device_info *devinfo = p->devinfo;
1292
1293 /* The next instruction (where the ENDIF would be, if it existed) */
1294 brw_inst *next_inst = &p->store[p->nr_insn];
1295
1296 assert(p->single_program_flow);
1297 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1298 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1299 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1300
1301 /* Convert IF to an ADD instruction that moves the instruction pointer
1302 * to the first instruction of the ELSE block. If there is no ELSE
1303 * block, point to where ENDIF would be. Reverse the predicate.
1304 *
1305 * There's no need to execute an ENDIF since we don't need to do any
1306 * stack operations, and if we're currently executing, we just want to
1307 * continue normally.
1308 */
1309 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1310 brw_inst_set_pred_inv(devinfo, if_inst, true);
1311
1312 if (else_inst != NULL) {
1313 /* Convert ELSE to an ADD instruction that points where the ENDIF
1314 * would be.
1315 */
1316 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1317
1318 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1319 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1320 } else {
1321 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1322 }
1323 }
1324
1325 /**
1326 * Patch IF and ELSE instructions with appropriate jump targets.
1327 */
1328 static void
1329 patch_IF_ELSE(struct brw_codegen *p,
1330 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1331 {
1332 const struct gen_device_info *devinfo = p->devinfo;
1333
1334 /* We shouldn't be patching IF and ELSE instructions in single program flow
1335 * mode when gen < 6, because in single program flow mode on those
1336 * platforms, we convert flow control instructions to conditional ADDs that
1337 * operate on IP (see brw_ENDIF).
1338 *
1339 * However, on Gen6, writing to IP doesn't work in single program flow mode
1340 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1341 * not be updated by non-flow control instructions."). And on later
1342 * platforms, there is no significant benefit to converting control flow
1343 * instructions to conditional ADDs. So we do patch IF and ELSE
1344 * instructions in single program flow mode on those platforms.
1345 */
1346 if (devinfo->gen < 6)
1347 assert(!p->single_program_flow);
1348
1349 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1350 assert(endif_inst != NULL);
1351 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1352
1353 unsigned br = brw_jump_scale(devinfo);
1354
1355 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1356 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1357
1358 if (else_inst == NULL) {
1359 /* Patch IF -> ENDIF */
1360 if (devinfo->gen < 6) {
1361 /* Turn it into an IFF, which means no mask stack operations for
1362 * all-false and jumping past the ENDIF.
1363 */
1364 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1365 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1366 br * (endif_inst - if_inst + 1));
1367 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1368 } else if (devinfo->gen == 6) {
1369 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1370 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1371 } else {
1372 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1373 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1374 }
1375 } else {
1376 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1377
1378 /* Patch IF -> ELSE */
1379 if (devinfo->gen < 6) {
1380 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1381 br * (else_inst - if_inst));
1382 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1383 } else if (devinfo->gen == 6) {
1384 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1385 br * (else_inst - if_inst + 1));
1386 }
1387
1388 /* Patch ELSE -> ENDIF */
1389 if (devinfo->gen < 6) {
1390 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1391 * matching ENDIF.
1392 */
1393 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1394 br * (endif_inst - else_inst + 1));
1395 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1396 } else if (devinfo->gen == 6) {
1397 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1398 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1399 br * (endif_inst - else_inst));
1400 } else {
1401 /* The IF instruction's JIP should point just past the ELSE */
1402 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1403 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1404 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1405 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1406 if (devinfo->gen >= 8) {
1407 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1408 * should point to ENDIF.
1409 */
1410 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1411 }
1412 }
1413 }
1414 }
1415
1416 void
1417 brw_ELSE(struct brw_codegen *p)
1418 {
1419 const struct gen_device_info *devinfo = p->devinfo;
1420 brw_inst *insn;
1421
1422 insn = next_insn(p, BRW_OPCODE_ELSE);
1423
1424 if (devinfo->gen < 6) {
1425 brw_set_dest(p, insn, brw_ip_reg());
1426 brw_set_src0(p, insn, brw_ip_reg());
1427 brw_set_src1(p, insn, brw_imm_d(0x0));
1428 } else if (devinfo->gen == 6) {
1429 brw_set_dest(p, insn, brw_imm_w(0));
1430 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1431 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1432 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1433 } else if (devinfo->gen == 7) {
1434 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1435 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1436 brw_set_src1(p, insn, brw_imm_w(0));
1437 brw_inst_set_jip(devinfo, insn, 0);
1438 brw_inst_set_uip(devinfo, insn, 0);
1439 } else {
1440 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1441 brw_set_src0(p, insn, brw_imm_d(0));
1442 brw_inst_set_jip(devinfo, insn, 0);
1443 brw_inst_set_uip(devinfo, insn, 0);
1444 }
1445
1446 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1447 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1448 if (!p->single_program_flow && devinfo->gen < 6)
1449 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1450
1451 push_if_stack(p, insn);
1452 }
1453
1454 void
1455 brw_ENDIF(struct brw_codegen *p)
1456 {
1457 const struct gen_device_info *devinfo = p->devinfo;
1458 brw_inst *insn = NULL;
1459 brw_inst *else_inst = NULL;
1460 brw_inst *if_inst = NULL;
1461 brw_inst *tmp;
1462 bool emit_endif = true;
1463
1464 /* In single program flow mode, we can express IF and ELSE instructions
1465 * equivalently as ADD instructions that operate on IP. On platforms prior
1466 * to Gen6, flow control instructions cause an implied thread switch, so
1467 * this is a significant savings.
1468 *
1469 * However, on Gen6, writing to IP doesn't work in single program flow mode
1470 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1471 * not be updated by non-flow control instructions."). And on later
1472 * platforms, there is no significant benefit to converting control flow
1473 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1474 * Gen5.
1475 */
1476 if (devinfo->gen < 6 && p->single_program_flow)
1477 emit_endif = false;
1478
1479 /*
1480 * A single next_insn() may change the base address of instruction store
1481 * memory(p->store), so call it first before referencing the instruction
1482 * store pointer from an index
1483 */
1484 if (emit_endif)
1485 insn = next_insn(p, BRW_OPCODE_ENDIF);
1486
1487 /* Pop the IF and (optional) ELSE instructions from the stack */
1488 p->if_depth_in_loop[p->loop_stack_depth]--;
1489 tmp = pop_if_stack(p);
1490 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1491 else_inst = tmp;
1492 tmp = pop_if_stack(p);
1493 }
1494 if_inst = tmp;
1495
1496 if (!emit_endif) {
1497 /* ENDIF is useless; don't bother emitting it. */
1498 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1499 return;
1500 }
1501
1502 if (devinfo->gen < 6) {
1503 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1504 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1505 brw_set_src1(p, insn, brw_imm_d(0x0));
1506 } else if (devinfo->gen == 6) {
1507 brw_set_dest(p, insn, brw_imm_w(0));
1508 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1509 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1510 } else if (devinfo->gen == 7) {
1511 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1512 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1513 brw_set_src1(p, insn, brw_imm_w(0));
1514 } else {
1515 brw_set_src0(p, insn, brw_imm_d(0));
1516 }
1517
1518 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1519 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1520 if (devinfo->gen < 6)
1521 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1522
1523 /* Also pop item off the stack in the endif instruction: */
1524 if (devinfo->gen < 6) {
1525 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1526 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1527 } else if (devinfo->gen == 6) {
1528 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1529 } else {
1530 brw_inst_set_jip(devinfo, insn, 2);
1531 }
1532 patch_IF_ELSE(p, if_inst, else_inst, insn);
1533 }
1534
1535 brw_inst *
1536 brw_BREAK(struct brw_codegen *p)
1537 {
1538 const struct gen_device_info *devinfo = p->devinfo;
1539 brw_inst *insn;
1540
1541 insn = next_insn(p, BRW_OPCODE_BREAK);
1542 if (devinfo->gen >= 8) {
1543 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1544 brw_set_src0(p, insn, brw_imm_d(0x0));
1545 } else if (devinfo->gen >= 6) {
1546 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1547 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1548 brw_set_src1(p, insn, brw_imm_d(0x0));
1549 } else {
1550 brw_set_dest(p, insn, brw_ip_reg());
1551 brw_set_src0(p, insn, brw_ip_reg());
1552 brw_set_src1(p, insn, brw_imm_d(0x0));
1553 brw_inst_set_gen4_pop_count(devinfo, insn,
1554 p->if_depth_in_loop[p->loop_stack_depth]);
1555 }
1556 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1557 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1558
1559 return insn;
1560 }
1561
1562 brw_inst *
1563 brw_CONT(struct brw_codegen *p)
1564 {
1565 const struct gen_device_info *devinfo = p->devinfo;
1566 brw_inst *insn;
1567
1568 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1569 brw_set_dest(p, insn, brw_ip_reg());
1570 if (devinfo->gen >= 8) {
1571 brw_set_src0(p, insn, brw_imm_d(0x0));
1572 } else {
1573 brw_set_src0(p, insn, brw_ip_reg());
1574 brw_set_src1(p, insn, brw_imm_d(0x0));
1575 }
1576
1577 if (devinfo->gen < 6) {
1578 brw_inst_set_gen4_pop_count(devinfo, insn,
1579 p->if_depth_in_loop[p->loop_stack_depth]);
1580 }
1581 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1582 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1583 return insn;
1584 }
1585
1586 brw_inst *
1587 gen6_HALT(struct brw_codegen *p)
1588 {
1589 const struct gen_device_info *devinfo = p->devinfo;
1590 brw_inst *insn;
1591
1592 insn = next_insn(p, BRW_OPCODE_HALT);
1593 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1594 if (devinfo->gen >= 8) {
1595 brw_set_src0(p, insn, brw_imm_d(0x0));
1596 } else {
1597 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1598 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1599 }
1600
1601 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1602 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1603 return insn;
1604 }
1605
1606 /* DO/WHILE loop:
1607 *
1608 * The DO/WHILE is just an unterminated loop -- break or continue are
1609 * used for control within the loop. We have a few ways they can be
1610 * done.
1611 *
1612 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1613 * jip and no DO instruction.
1614 *
1615 * For non-uniform control flow pre-gen6, there's a DO instruction to
1616 * push the mask, and a WHILE to jump back, and BREAK to get out and
1617 * pop the mask.
1618 *
1619 * For gen6, there's no more mask stack, so no need for DO. WHILE
1620 * just points back to the first instruction of the loop.
1621 */
1622 brw_inst *
1623 brw_DO(struct brw_codegen *p, unsigned execute_size)
1624 {
1625 const struct gen_device_info *devinfo = p->devinfo;
1626
1627 if (devinfo->gen >= 6 || p->single_program_flow) {
1628 push_loop_stack(p, &p->store[p->nr_insn]);
1629 return &p->store[p->nr_insn];
1630 } else {
1631 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1632
1633 push_loop_stack(p, insn);
1634
1635 /* Override the defaults for this instruction:
1636 */
1637 brw_set_dest(p, insn, brw_null_reg());
1638 brw_set_src0(p, insn, brw_null_reg());
1639 brw_set_src1(p, insn, brw_null_reg());
1640
1641 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1642 brw_inst_set_exec_size(devinfo, insn, execute_size);
1643 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1644
1645 return insn;
1646 }
1647 }
1648
1649 /**
1650 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1651 * instruction here.
1652 *
1653 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1654 * nesting, since it can always just point to the end of the block/current loop.
1655 */
1656 static void
1657 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1658 {
1659 const struct gen_device_info *devinfo = p->devinfo;
1660 brw_inst *do_inst = get_inner_do_insn(p);
1661 brw_inst *inst;
1662 unsigned br = brw_jump_scale(devinfo);
1663
1664 assert(devinfo->gen < 6);
1665
1666 for (inst = while_inst - 1; inst != do_inst; inst--) {
1667 /* If the jump count is != 0, that means that this instruction has already
1668 * been patched because it's part of a loop inside of the one we're
1669 * patching.
1670 */
1671 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1672 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1673 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1674 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1675 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1676 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1677 }
1678 }
1679 }
1680
1681 brw_inst *
1682 brw_WHILE(struct brw_codegen *p)
1683 {
1684 const struct gen_device_info *devinfo = p->devinfo;
1685 brw_inst *insn, *do_insn;
1686 unsigned br = brw_jump_scale(devinfo);
1687
1688 if (devinfo->gen >= 6) {
1689 insn = next_insn(p, BRW_OPCODE_WHILE);
1690 do_insn = get_inner_do_insn(p);
1691
1692 if (devinfo->gen >= 8) {
1693 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1694 brw_set_src0(p, insn, brw_imm_d(0));
1695 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1696 } else if (devinfo->gen == 7) {
1697 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1698 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1699 brw_set_src1(p, insn, brw_imm_w(0));
1700 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1701 } else {
1702 brw_set_dest(p, insn, brw_imm_w(0));
1703 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1704 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1705 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1706 }
1707
1708 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1709
1710 } else {
1711 if (p->single_program_flow) {
1712 insn = next_insn(p, BRW_OPCODE_ADD);
1713 do_insn = get_inner_do_insn(p);
1714
1715 brw_set_dest(p, insn, brw_ip_reg());
1716 brw_set_src0(p, insn, brw_ip_reg());
1717 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1718 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1719 } else {
1720 insn = next_insn(p, BRW_OPCODE_WHILE);
1721 do_insn = get_inner_do_insn(p);
1722
1723 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1724
1725 brw_set_dest(p, insn, brw_ip_reg());
1726 brw_set_src0(p, insn, brw_ip_reg());
1727 brw_set_src1(p, insn, brw_imm_d(0));
1728
1729 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1730 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1731 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1732
1733 brw_patch_break_cont(p, insn);
1734 }
1735 }
1736 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1737
1738 p->loop_stack_depth--;
1739
1740 return insn;
1741 }
1742
1743 /* FORWARD JUMPS:
1744 */
1745 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1746 {
1747 const struct gen_device_info *devinfo = p->devinfo;
1748 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1749 unsigned jmpi = 1;
1750
1751 if (devinfo->gen >= 5)
1752 jmpi = 2;
1753
1754 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1755 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1756
1757 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1758 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1759 }
1760
1761 /* To integrate with the above, it makes sense that the comparison
1762 * instruction should populate the flag register. It might be simpler
1763 * just to use the flag reg for most WM tasks?
1764 */
1765 void brw_CMP(struct brw_codegen *p,
1766 struct brw_reg dest,
1767 unsigned conditional,
1768 struct brw_reg src0,
1769 struct brw_reg src1)
1770 {
1771 const struct gen_device_info *devinfo = p->devinfo;
1772 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1773
1774 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1775 brw_set_dest(p, insn, dest);
1776 brw_set_src0(p, insn, src0);
1777 brw_set_src1(p, insn, src1);
1778
1779 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1780 * page says:
1781 * "Any CMP instruction with a null destination must use a {switch}."
1782 *
1783 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1784 * mentioned on their work-arounds pages.
1785 */
1786 if (devinfo->gen == 7) {
1787 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1788 dest.nr == BRW_ARF_NULL) {
1789 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1790 }
1791 }
1792 }
1793
1794 /***********************************************************************
1795 * Helpers for the various SEND message types:
1796 */
1797
1798 /** Extended math function, float[8].
1799 */
1800 void gen4_math(struct brw_codegen *p,
1801 struct brw_reg dest,
1802 unsigned function,
1803 unsigned msg_reg_nr,
1804 struct brw_reg src,
1805 unsigned precision )
1806 {
1807 const struct gen_device_info *devinfo = p->devinfo;
1808 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1809 unsigned data_type;
1810 if (has_scalar_region(src)) {
1811 data_type = BRW_MATH_DATA_SCALAR;
1812 } else {
1813 data_type = BRW_MATH_DATA_VECTOR;
1814 }
1815
1816 assert(devinfo->gen < 6);
1817
1818 /* Example code doesn't set predicate_control for send
1819 * instructions.
1820 */
1821 brw_inst_set_pred_control(devinfo, insn, 0);
1822 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1823
1824 brw_set_dest(p, insn, dest);
1825 brw_set_src0(p, insn, src);
1826 brw_set_math_message(p,
1827 insn,
1828 function,
1829 src.type == BRW_REGISTER_TYPE_D,
1830 precision,
1831 data_type);
1832 }
1833
1834 void gen6_math(struct brw_codegen *p,
1835 struct brw_reg dest,
1836 unsigned function,
1837 struct brw_reg src0,
1838 struct brw_reg src1)
1839 {
1840 const struct gen_device_info *devinfo = p->devinfo;
1841 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1842
1843 assert(devinfo->gen >= 6);
1844
1845 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1846 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1847
1848 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1849 if (devinfo->gen == 6) {
1850 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1851 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1852 }
1853
1854 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1855 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1856 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1857 assert(src0.type != BRW_REGISTER_TYPE_F);
1858 assert(src1.type != BRW_REGISTER_TYPE_F);
1859 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1860 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1861 } else {
1862 assert(src0.type == BRW_REGISTER_TYPE_F);
1863 assert(src1.type == BRW_REGISTER_TYPE_F);
1864 }
1865
1866 /* Source modifiers are ignored for extended math instructions on Gen6. */
1867 if (devinfo->gen == 6) {
1868 assert(!src0.negate);
1869 assert(!src0.abs);
1870 assert(!src1.negate);
1871 assert(!src1.abs);
1872 }
1873
1874 brw_inst_set_math_function(devinfo, insn, function);
1875
1876 brw_set_dest(p, insn, dest);
1877 brw_set_src0(p, insn, src0);
1878 brw_set_src1(p, insn, src1);
1879 }
1880
1881 /**
1882 * Return the right surface index to access the thread scratch space using
1883 * stateless dataport messages.
1884 */
1885 unsigned
1886 brw_scratch_surface_idx(const struct brw_codegen *p)
1887 {
1888 /* The scratch space is thread-local so IA coherency is unnecessary. */
1889 if (p->devinfo->gen >= 8)
1890 return GEN8_BTI_STATELESS_NON_COHERENT;
1891 else
1892 return BRW_BTI_STATELESS;
1893 }
1894
1895 /**
1896 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1897 * using a constant offset per channel.
1898 *
1899 * The offset must be aligned to oword size (16 bytes). Used for
1900 * register spilling.
1901 */
1902 void brw_oword_block_write_scratch(struct brw_codegen *p,
1903 struct brw_reg mrf,
1904 int num_regs,
1905 unsigned offset)
1906 {
1907 const struct gen_device_info *devinfo = p->devinfo;
1908 const unsigned target_cache =
1909 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1910 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1911 BRW_SFID_DATAPORT_WRITE);
1912 uint32_t msg_type;
1913
1914 if (devinfo->gen >= 6)
1915 offset /= 16;
1916
1917 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1918
1919 const unsigned mlen = 1 + num_regs;
1920
1921 /* Set up the message header. This is g0, with g0.2 filled with
1922 * the offset. We don't want to leave our offset around in g0 or
1923 * it'll screw up texture samples, so set it up inside the message
1924 * reg.
1925 */
1926 {
1927 brw_push_insn_state(p);
1928 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1929 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1930 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1931
1932 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1933
1934 /* set message header global offset field (reg 0, element 2) */
1935 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1936 brw_MOV(p,
1937 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1938 mrf.nr,
1939 2), BRW_REGISTER_TYPE_UD),
1940 brw_imm_ud(offset));
1941
1942 brw_pop_insn_state(p);
1943 }
1944
1945 {
1946 struct brw_reg dest;
1947 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1948 int send_commit_msg;
1949 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1950 BRW_REGISTER_TYPE_UW);
1951
1952 brw_inst_set_sfid(devinfo, insn, target_cache);
1953 brw_inst_set_compression(devinfo, insn, false);
1954
1955 if (brw_inst_exec_size(devinfo, insn) >= 16)
1956 src_header = vec16(src_header);
1957
1958 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1959 if (devinfo->gen < 6)
1960 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
1961
1962 /* Until gen6, writes followed by reads from the same location
1963 * are not guaranteed to be ordered unless write_commit is set.
1964 * If set, then a no-op write is issued to the destination
1965 * register to set a dependency, and a read from the destination
1966 * can be used to ensure the ordering.
1967 *
1968 * For gen6, only writes between different threads need ordering
1969 * protection. Our use of DP writes is all about register
1970 * spilling within a thread.
1971 */
1972 if (devinfo->gen >= 6) {
1973 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1974 send_commit_msg = 0;
1975 } else {
1976 dest = src_header;
1977 send_commit_msg = 1;
1978 }
1979
1980 brw_set_dest(p, insn, dest);
1981 if (devinfo->gen >= 6) {
1982 brw_set_src0(p, insn, mrf);
1983 } else {
1984 brw_set_src0(p, insn, brw_null_reg());
1985 }
1986
1987 if (devinfo->gen >= 6)
1988 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
1989 else
1990 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
1991
1992 brw_set_desc(p, insn,
1993 brw_message_desc(devinfo, mlen, send_commit_msg, true) |
1994 brw_dp_write_desc(devinfo, brw_scratch_surface_idx(p),
1995 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
1996 msg_type, 0, /* not a render target */
1997 send_commit_msg));
1998 }
1999 }
2000
2001
2002 /**
2003 * Read a block of owords (half a GRF each) from the scratch buffer
2004 * using a constant index per channel.
2005 *
2006 * Offset must be aligned to oword size (16 bytes). Used for register
2007 * spilling.
2008 */
2009 void
2010 brw_oword_block_read_scratch(struct brw_codegen *p,
2011 struct brw_reg dest,
2012 struct brw_reg mrf,
2013 int num_regs,
2014 unsigned offset)
2015 {
2016 const struct gen_device_info *devinfo = p->devinfo;
2017
2018 if (devinfo->gen >= 6)
2019 offset /= 16;
2020
2021 if (p->devinfo->gen >= 7) {
2022 /* On gen 7 and above, we no longer have message registers and we can
2023 * send from any register we want. By using the destination register
2024 * for the message, we guarantee that the implied message write won't
2025 * accidentally overwrite anything. This has been a problem because
2026 * the MRF registers and source for the final FB write are both fixed
2027 * and may overlap.
2028 */
2029 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2030 } else {
2031 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2032 }
2033 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2034
2035 const unsigned rlen = num_regs;
2036 const unsigned target_cache =
2037 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2038 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2039 BRW_SFID_DATAPORT_READ);
2040
2041 {
2042 brw_push_insn_state(p);
2043 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2044 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2045 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2046
2047 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2048
2049 /* set message header global offset field (reg 0, element 2) */
2050 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2051 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2052
2053 brw_pop_insn_state(p);
2054 }
2055
2056 {
2057 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2058
2059 brw_inst_set_sfid(devinfo, insn, target_cache);
2060 assert(brw_inst_pred_control(devinfo, insn) == 0);
2061 brw_inst_set_compression(devinfo, insn, false);
2062
2063 brw_set_dest(p, insn, dest); /* UW? */
2064 if (devinfo->gen >= 6) {
2065 brw_set_src0(p, insn, mrf);
2066 } else {
2067 brw_set_src0(p, insn, brw_null_reg());
2068 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2069 }
2070
2071 brw_set_desc(p, insn,
2072 brw_message_desc(devinfo, 1, rlen, true) |
2073 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2074 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2075 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2076 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2077 }
2078 }
2079
2080 void
2081 gen7_block_read_scratch(struct brw_codegen *p,
2082 struct brw_reg dest,
2083 int num_regs,
2084 unsigned offset)
2085 {
2086 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2087 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2088
2089 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2090
2091 /* The HW requires that the header is present; this is to get the g0.5
2092 * scratch offset.
2093 */
2094 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2095
2096 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2097 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2098 * is 32 bytes, which happens to be the size of a register.
2099 */
2100 offset /= REG_SIZE;
2101 assert(offset < (1 << 12));
2102
2103 gen7_set_dp_scratch_message(p, insn,
2104 false, /* scratch read */
2105 false, /* OWords */
2106 false, /* invalidate after read */
2107 num_regs,
2108 offset,
2109 1, /* mlen: just g0 */
2110 num_regs, /* rlen */
2111 true); /* header present */
2112 }
2113
2114 /**
2115 * Read float[4] vectors from the data port constant cache.
2116 * Location (in buffer) should be a multiple of 16.
2117 * Used for fetching shader constants.
2118 */
2119 void brw_oword_block_read(struct brw_codegen *p,
2120 struct brw_reg dest,
2121 struct brw_reg mrf,
2122 uint32_t offset,
2123 uint32_t bind_table_index)
2124 {
2125 const struct gen_device_info *devinfo = p->devinfo;
2126 const unsigned target_cache =
2127 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2128 BRW_SFID_DATAPORT_READ);
2129 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2130
2131 /* On newer hardware, offset is in units of owords. */
2132 if (devinfo->gen >= 6)
2133 offset /= 16;
2134
2135 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2136
2137 brw_push_insn_state(p);
2138 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2139 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2140 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2141
2142 brw_push_insn_state(p);
2143 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2144 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2145
2146 /* set message header global offset field (reg 0, element 2) */
2147 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2148 brw_MOV(p,
2149 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2150 mrf.nr,
2151 2), BRW_REGISTER_TYPE_UD),
2152 brw_imm_ud(offset));
2153 brw_pop_insn_state(p);
2154
2155 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2156
2157 brw_inst_set_sfid(devinfo, insn, target_cache);
2158
2159 /* cast dest to a uword[8] vector */
2160 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2161
2162 brw_set_dest(p, insn, dest);
2163 if (devinfo->gen >= 6) {
2164 brw_set_src0(p, insn, mrf);
2165 } else {
2166 brw_set_src0(p, insn, brw_null_reg());
2167 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2168 }
2169
2170 brw_set_desc(p, insn,
2171 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2172 brw_dp_read_desc(devinfo, bind_table_index,
2173 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2174 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2175 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2176
2177 brw_pop_insn_state(p);
2178 }
2179
2180 brw_inst *
2181 brw_fb_WRITE(struct brw_codegen *p,
2182 struct brw_reg payload,
2183 struct brw_reg implied_header,
2184 unsigned msg_control,
2185 unsigned binding_table_index,
2186 unsigned msg_length,
2187 unsigned response_length,
2188 bool eot,
2189 bool last_render_target,
2190 bool header_present)
2191 {
2192 const struct gen_device_info *devinfo = p->devinfo;
2193 const unsigned target_cache =
2194 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2195 BRW_SFID_DATAPORT_WRITE);
2196 brw_inst *insn;
2197 unsigned msg_type;
2198 struct brw_reg dest, src0;
2199
2200 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2201 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2202 else
2203 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2204
2205 if (devinfo->gen >= 6) {
2206 insn = next_insn(p, BRW_OPCODE_SENDC);
2207 } else {
2208 insn = next_insn(p, BRW_OPCODE_SEND);
2209 }
2210 brw_inst_set_sfid(devinfo, insn, target_cache);
2211 brw_inst_set_compression(devinfo, insn, false);
2212
2213 if (devinfo->gen >= 6) {
2214 /* headerless version, just submit color payload */
2215 src0 = payload;
2216
2217 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2218 } else {
2219 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2220 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2221 src0 = implied_header;
2222
2223 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2224 }
2225
2226 brw_set_dest(p, insn, dest);
2227 brw_set_src0(p, insn, src0);
2228 brw_set_desc(p, insn,
2229 brw_message_desc(devinfo, msg_length, response_length,
2230 header_present) |
2231 brw_dp_write_desc(devinfo, binding_table_index, msg_control,
2232 msg_type, last_render_target,
2233 0 /* send_commit_msg */));
2234 brw_inst_set_eot(devinfo, insn, eot);
2235
2236 return insn;
2237 }
2238
2239 brw_inst *
2240 gen9_fb_READ(struct brw_codegen *p,
2241 struct brw_reg dst,
2242 struct brw_reg payload,
2243 unsigned binding_table_index,
2244 unsigned msg_length,
2245 unsigned response_length,
2246 bool per_sample)
2247 {
2248 const struct gen_device_info *devinfo = p->devinfo;
2249 assert(devinfo->gen >= 9);
2250 const unsigned msg_subtype =
2251 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2252 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2253
2254 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2255 brw_set_dest(p, insn, dst);
2256 brw_set_src0(p, insn, payload);
2257 brw_set_desc(
2258 p, insn,
2259 brw_message_desc(devinfo, msg_length, response_length, true) |
2260 brw_dp_read_desc(devinfo, binding_table_index,
2261 per_sample << 5 | msg_subtype,
2262 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2263 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2264 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2265
2266 return insn;
2267 }
2268
2269 /**
2270 * Texture sample instruction.
2271 * Note: the msg_type plus msg_length values determine exactly what kind
2272 * of sampling operation is performed. See volume 4, page 161 of docs.
2273 */
2274 void brw_SAMPLE(struct brw_codegen *p,
2275 struct brw_reg dest,
2276 unsigned msg_reg_nr,
2277 struct brw_reg src0,
2278 unsigned binding_table_index,
2279 unsigned sampler,
2280 unsigned msg_type,
2281 unsigned response_length,
2282 unsigned msg_length,
2283 unsigned header_present,
2284 unsigned simd_mode,
2285 unsigned return_format)
2286 {
2287 const struct gen_device_info *devinfo = p->devinfo;
2288 brw_inst *insn;
2289
2290 if (msg_reg_nr != -1)
2291 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2292
2293 insn = next_insn(p, BRW_OPCODE_SEND);
2294 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2295 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2296
2297 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2298 *
2299 * "Instruction compression is not allowed for this instruction (that
2300 * is, send). The hardware behavior is undefined if this instruction is
2301 * set as compressed. However, compress control can be set to "SecHalf"
2302 * to affect the EMask generation."
2303 *
2304 * No similar wording is found in later PRMs, but there are examples
2305 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2306 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2307 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2308 */
2309 brw_inst_set_compression(devinfo, insn, false);
2310
2311 if (devinfo->gen < 6)
2312 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2313
2314 brw_set_dest(p, insn, dest);
2315 brw_set_src0(p, insn, src0);
2316 brw_set_desc(p, insn,
2317 brw_message_desc(devinfo, msg_length, response_length,
2318 header_present) |
2319 brw_sampler_desc(devinfo, binding_table_index, sampler,
2320 msg_type, simd_mode, return_format));
2321 }
2322
2323 /* Adjust the message header's sampler state pointer to
2324 * select the correct group of 16 samplers.
2325 */
2326 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2327 struct brw_reg header,
2328 struct brw_reg sampler_index)
2329 {
2330 /* The "Sampler Index" field can only store values between 0 and 15.
2331 * However, we can add an offset to the "Sampler State Pointer"
2332 * field, effectively selecting a different set of 16 samplers.
2333 *
2334 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2335 * offset, and each sampler state is only 16-bytes, so we can't
2336 * exclusively use the offset - we have to use both.
2337 */
2338
2339 const struct gen_device_info *devinfo = p->devinfo;
2340
2341 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2342 const int sampler_state_size = 16; /* 16 bytes */
2343 uint32_t sampler = sampler_index.ud;
2344
2345 if (sampler >= 16) {
2346 assert(devinfo->is_haswell || devinfo->gen >= 8);
2347 brw_ADD(p,
2348 get_element_ud(header, 3),
2349 get_element_ud(brw_vec8_grf(0, 0), 3),
2350 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2351 }
2352 } else {
2353 /* Non-const sampler array indexing case */
2354 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2355 return;
2356 }
2357
2358 struct brw_reg temp = get_element_ud(header, 3);
2359
2360 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2361 brw_SHL(p, temp, temp, brw_imm_ud(4));
2362 brw_ADD(p,
2363 get_element_ud(header, 3),
2364 get_element_ud(brw_vec8_grf(0, 0), 3),
2365 temp);
2366 }
2367 }
2368
2369 /* All these variables are pretty confusing - we might be better off
2370 * using bitmasks and macros for this, in the old style. Or perhaps
2371 * just having the caller instantiate the fields in dword3 itself.
2372 */
2373 void brw_urb_WRITE(struct brw_codegen *p,
2374 struct brw_reg dest,
2375 unsigned msg_reg_nr,
2376 struct brw_reg src0,
2377 enum brw_urb_write_flags flags,
2378 unsigned msg_length,
2379 unsigned response_length,
2380 unsigned offset,
2381 unsigned swizzle)
2382 {
2383 const struct gen_device_info *devinfo = p->devinfo;
2384 brw_inst *insn;
2385
2386 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2387
2388 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2389 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2390 brw_push_insn_state(p);
2391 brw_set_default_access_mode(p, BRW_ALIGN_1);
2392 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2393 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2394 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2395 BRW_REGISTER_TYPE_UD),
2396 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2397 brw_imm_ud(0xff00));
2398 brw_pop_insn_state(p);
2399 }
2400
2401 insn = next_insn(p, BRW_OPCODE_SEND);
2402
2403 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2404
2405 brw_set_dest(p, insn, dest);
2406 brw_set_src0(p, insn, src0);
2407 brw_set_src1(p, insn, brw_imm_d(0));
2408
2409 if (devinfo->gen < 6)
2410 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2411
2412 brw_set_urb_message(p,
2413 insn,
2414 flags,
2415 msg_length,
2416 response_length,
2417 offset,
2418 swizzle);
2419 }
2420
2421 void
2422 brw_send_indirect_message(struct brw_codegen *p,
2423 unsigned sfid,
2424 struct brw_reg dst,
2425 struct brw_reg payload,
2426 struct brw_reg desc,
2427 unsigned desc_imm)
2428 {
2429 const struct gen_device_info *devinfo = p->devinfo;
2430 struct brw_inst *send;
2431
2432 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2433
2434 assert(desc.type == BRW_REGISTER_TYPE_UD);
2435
2436 if (desc.file == BRW_IMMEDIATE_VALUE) {
2437 send = next_insn(p, BRW_OPCODE_SEND);
2438 brw_set_desc(p, send, desc.ud | desc_imm);
2439
2440 } else {
2441 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2442
2443 brw_push_insn_state(p);
2444 brw_set_default_access_mode(p, BRW_ALIGN_1);
2445 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2446 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2447 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2448
2449 /* Load the indirect descriptor to an address register using OR so the
2450 * caller can specify additional descriptor bits with the desc_imm
2451 * immediate.
2452 */
2453 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2454
2455 brw_pop_insn_state(p);
2456
2457 send = next_insn(p, BRW_OPCODE_SEND);
2458 brw_set_src1(p, send, addr);
2459 }
2460
2461 if (dst.width < BRW_EXECUTE_8)
2462 brw_inst_set_exec_size(devinfo, send, dst.width);
2463
2464 brw_set_dest(p, send, dst);
2465 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2466 brw_inst_set_sfid(devinfo, send, sfid);
2467 }
2468
2469 static void
2470 brw_send_indirect_surface_message(struct brw_codegen *p,
2471 unsigned sfid,
2472 struct brw_reg dst,
2473 struct brw_reg payload,
2474 struct brw_reg surface,
2475 unsigned desc_imm)
2476 {
2477 if (surface.file != BRW_IMMEDIATE_VALUE) {
2478 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2479
2480 brw_push_insn_state(p);
2481 brw_set_default_access_mode(p, BRW_ALIGN_1);
2482 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2483 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2484 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2485
2486 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2487 * some surface array is accessed out of bounds.
2488 */
2489 brw_AND(p, addr,
2490 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2491 BRW_GET_SWZ(surface.swizzle, 0)),
2492 brw_imm_ud(0xff));
2493
2494 brw_pop_insn_state(p);
2495
2496 surface = addr;
2497 }
2498
2499 brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm);
2500 }
2501
2502 static bool
2503 while_jumps_before_offset(const struct gen_device_info *devinfo,
2504 brw_inst *insn, int while_offset, int start_offset)
2505 {
2506 int scale = 16 / brw_jump_scale(devinfo);
2507 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2508 : brw_inst_jip(devinfo, insn);
2509 assert(jip < 0);
2510 return while_offset + jip * scale <= start_offset;
2511 }
2512
2513
2514 static int
2515 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2516 {
2517 int offset;
2518 void *store = p->store;
2519 const struct gen_device_info *devinfo = p->devinfo;
2520
2521 int depth = 0;
2522
2523 for (offset = next_offset(devinfo, store, start_offset);
2524 offset < p->next_insn_offset;
2525 offset = next_offset(devinfo, store, offset)) {
2526 brw_inst *insn = store + offset;
2527
2528 switch (brw_inst_opcode(devinfo, insn)) {
2529 case BRW_OPCODE_IF:
2530 depth++;
2531 break;
2532 case BRW_OPCODE_ENDIF:
2533 if (depth == 0)
2534 return offset;
2535 depth--;
2536 break;
2537 case BRW_OPCODE_WHILE:
2538 /* If the while doesn't jump before our instruction, it's the end
2539 * of a sibling do...while loop. Ignore it.
2540 */
2541 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2542 continue;
2543 /* fallthrough */
2544 case BRW_OPCODE_ELSE:
2545 case BRW_OPCODE_HALT:
2546 if (depth == 0)
2547 return offset;
2548 }
2549 }
2550
2551 return 0;
2552 }
2553
2554 /* There is no DO instruction on gen6, so to find the end of the loop
2555 * we have to see if the loop is jumping back before our start
2556 * instruction.
2557 */
2558 static int
2559 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2560 {
2561 const struct gen_device_info *devinfo = p->devinfo;
2562 int offset;
2563 void *store = p->store;
2564
2565 assert(devinfo->gen >= 6);
2566
2567 /* Always start after the instruction (such as a WHILE) we're trying to fix
2568 * up.
2569 */
2570 for (offset = next_offset(devinfo, store, start_offset);
2571 offset < p->next_insn_offset;
2572 offset = next_offset(devinfo, store, offset)) {
2573 brw_inst *insn = store + offset;
2574
2575 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2576 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2577 return offset;
2578 }
2579 }
2580 assert(!"not reached");
2581 return start_offset;
2582 }
2583
2584 /* After program generation, go back and update the UIP and JIP of
2585 * BREAK, CONT, and HALT instructions to their correct locations.
2586 */
2587 void
2588 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2589 {
2590 const struct gen_device_info *devinfo = p->devinfo;
2591 int offset;
2592 int br = brw_jump_scale(devinfo);
2593 int scale = 16 / br;
2594 void *store = p->store;
2595
2596 if (devinfo->gen < 6)
2597 return;
2598
2599 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2600 brw_inst *insn = store + offset;
2601 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2602
2603 int block_end_offset = brw_find_next_block_end(p, offset);
2604 switch (brw_inst_opcode(devinfo, insn)) {
2605 case BRW_OPCODE_BREAK:
2606 assert(block_end_offset != 0);
2607 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2608 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2609 brw_inst_set_uip(devinfo, insn,
2610 (brw_find_loop_end(p, offset) - offset +
2611 (devinfo->gen == 6 ? 16 : 0)) / scale);
2612 break;
2613 case BRW_OPCODE_CONTINUE:
2614 assert(block_end_offset != 0);
2615 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2616 brw_inst_set_uip(devinfo, insn,
2617 (brw_find_loop_end(p, offset) - offset) / scale);
2618
2619 assert(brw_inst_uip(devinfo, insn) != 0);
2620 assert(brw_inst_jip(devinfo, insn) != 0);
2621 break;
2622
2623 case BRW_OPCODE_ENDIF: {
2624 int32_t jump = (block_end_offset == 0) ?
2625 1 * br : (block_end_offset - offset) / scale;
2626 if (devinfo->gen >= 7)
2627 brw_inst_set_jip(devinfo, insn, jump);
2628 else
2629 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2630 break;
2631 }
2632
2633 case BRW_OPCODE_HALT:
2634 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2635 *
2636 * "In case of the halt instruction not inside any conditional
2637 * code block, the value of <JIP> and <UIP> should be the
2638 * same. In case of the halt instruction inside conditional code
2639 * block, the <UIP> should be the end of the program, and the
2640 * <JIP> should be end of the most inner conditional code block."
2641 *
2642 * The uip will have already been set by whoever set up the
2643 * instruction.
2644 */
2645 if (block_end_offset == 0) {
2646 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2647 } else {
2648 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2649 }
2650 assert(brw_inst_uip(devinfo, insn) != 0);
2651 assert(brw_inst_jip(devinfo, insn) != 0);
2652 break;
2653 }
2654 }
2655 }
2656
2657 void brw_ff_sync(struct brw_codegen *p,
2658 struct brw_reg dest,
2659 unsigned msg_reg_nr,
2660 struct brw_reg src0,
2661 bool allocate,
2662 unsigned response_length,
2663 bool eot)
2664 {
2665 const struct gen_device_info *devinfo = p->devinfo;
2666 brw_inst *insn;
2667
2668 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2669
2670 insn = next_insn(p, BRW_OPCODE_SEND);
2671 brw_set_dest(p, insn, dest);
2672 brw_set_src0(p, insn, src0);
2673 brw_set_src1(p, insn, brw_imm_d(0));
2674
2675 if (devinfo->gen < 6)
2676 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2677
2678 brw_set_ff_sync_message(p,
2679 insn,
2680 allocate,
2681 response_length,
2682 eot);
2683 }
2684
2685 /**
2686 * Emit the SEND instruction necessary to generate stream output data on Gen6
2687 * (for transform feedback).
2688 *
2689 * If send_commit_msg is true, this is the last piece of stream output data
2690 * from this thread, so send the data as a committed write. According to the
2691 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2692 *
2693 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2694 * writes are complete by sending the final write as a committed write."
2695 */
2696 void
2697 brw_svb_write(struct brw_codegen *p,
2698 struct brw_reg dest,
2699 unsigned msg_reg_nr,
2700 struct brw_reg src0,
2701 unsigned binding_table_index,
2702 bool send_commit_msg)
2703 {
2704 const struct gen_device_info *devinfo = p->devinfo;
2705 const unsigned target_cache =
2706 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2707 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2708 BRW_SFID_DATAPORT_WRITE);
2709 brw_inst *insn;
2710
2711 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2712
2713 insn = next_insn(p, BRW_OPCODE_SEND);
2714 brw_inst_set_sfid(devinfo, insn, target_cache);
2715 brw_set_dest(p, insn, dest);
2716 brw_set_src0(p, insn, src0);
2717 brw_set_desc(p, insn,
2718 brw_message_desc(devinfo, 1, send_commit_msg, true) |
2719 brw_dp_write_desc(devinfo, binding_table_index,
2720 0, /* msg_control: ignored */
2721 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2722 0, /* last_render_target: ignored */
2723 send_commit_msg)); /* send_commit_msg */
2724 }
2725
2726 static unsigned
2727 brw_surface_payload_size(struct brw_codegen *p,
2728 unsigned num_channels,
2729 bool has_simd4x2,
2730 bool has_simd16)
2731 {
2732 if (has_simd4x2 && brw_get_default_access_mode(p) == BRW_ALIGN_16)
2733 return 1;
2734 else if (has_simd16 && brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2735 return 2 * num_channels;
2736 else
2737 return num_channels;
2738 }
2739
2740 static uint32_t
2741 brw_dp_untyped_atomic_desc(struct brw_codegen *p,
2742 unsigned atomic_op,
2743 bool response_expected)
2744 {
2745 const struct gen_device_info *devinfo = p->devinfo;
2746 unsigned msg_control =
2747 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2748 (response_expected ? 1 << 5 : 0); /* Return data expected */
2749 unsigned msg_type;
2750
2751 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2752 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2753 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2754 msg_control |= 1 << 4; /* SIMD8 mode */
2755
2756 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP;
2757 } else {
2758 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2;
2759 }
2760 } else {
2761 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2762 msg_control |= 1 << 4; /* SIMD8 mode */
2763
2764 msg_type = GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP;
2765 }
2766
2767 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2768 }
2769
2770 void
2771 brw_untyped_atomic(struct brw_codegen *p,
2772 struct brw_reg dst,
2773 struct brw_reg payload,
2774 struct brw_reg surface,
2775 unsigned atomic_op,
2776 unsigned msg_length,
2777 bool response_expected,
2778 bool header_present)
2779 {
2780 const struct gen_device_info *devinfo = p->devinfo;
2781 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2782 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2783 GEN7_SFID_DATAPORT_DATA_CACHE);
2784 const unsigned response_length = brw_surface_payload_size(
2785 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, true);
2786 const unsigned desc =
2787 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2788 brw_dp_untyped_atomic_desc(p, atomic_op, response_expected);
2789 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2790 /* Mask out unused components -- This is especially important in Align16
2791 * mode on generations that don't have native support for SIMD4x2 atomics,
2792 * because unused but enabled components will cause the dataport to perform
2793 * additional atomic operations on the addresses that happen to be in the
2794 * uninitialized Y, Z and W coordinates of the payload.
2795 */
2796 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2797
2798 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
2799 payload, surface, desc);
2800 }
2801
2802 static uint32_t
2803 brw_dp_untyped_surface_read_desc(struct brw_codegen *p,
2804 unsigned num_channels)
2805 {
2806 const struct gen_device_info *devinfo = p->devinfo;
2807 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2808 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2809 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ);
2810 /* Set mask of 32-bit channels to drop. */
2811 unsigned msg_control = 0xf & (0xf << num_channels);
2812
2813 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2814 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2815 msg_control |= 1 << 4; /* SIMD16 mode */
2816 else
2817 msg_control |= 2 << 4; /* SIMD8 mode */
2818 }
2819
2820 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2821 }
2822
2823 void
2824 brw_untyped_surface_read(struct brw_codegen *p,
2825 struct brw_reg dst,
2826 struct brw_reg payload,
2827 struct brw_reg surface,
2828 unsigned msg_length,
2829 unsigned num_channels)
2830 {
2831 const struct gen_device_info *devinfo = p->devinfo;
2832 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2833 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2834 GEN7_SFID_DATAPORT_DATA_CACHE);
2835 const unsigned response_length =
2836 brw_surface_payload_size(p, num_channels, true, true);
2837 const unsigned desc =
2838 brw_message_desc(devinfo, msg_length, response_length, false) |
2839 brw_dp_untyped_surface_read_desc(p, num_channels);
2840
2841 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
2842 }
2843
2844 static uint32_t
2845 brw_dp_untyped_surface_write_desc(struct brw_codegen *p,
2846 unsigned num_channels)
2847 {
2848 const struct gen_device_info *devinfo = p->devinfo;
2849 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2850 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2851 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2852 /* Set mask of 32-bit channels to drop. */
2853 unsigned msg_control = 0xf & (0xf << num_channels);
2854
2855 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2856 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2857 msg_control |= 1 << 4; /* SIMD16 mode */
2858 else
2859 msg_control |= 2 << 4; /* SIMD8 mode */
2860 } else {
2861 if (devinfo->gen >= 8 || devinfo->is_haswell)
2862 msg_control |= 0 << 4; /* SIMD4x2 mode */
2863 else
2864 msg_control |= 2 << 4; /* SIMD8 mode */
2865 }
2866
2867 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2868 }
2869
2870 void
2871 brw_untyped_surface_write(struct brw_codegen *p,
2872 struct brw_reg payload,
2873 struct brw_reg surface,
2874 unsigned msg_length,
2875 unsigned num_channels,
2876 bool header_present)
2877 {
2878 const struct gen_device_info *devinfo = p->devinfo;
2879 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2880 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2881 GEN7_SFID_DATAPORT_DATA_CACHE);
2882 const unsigned desc =
2883 brw_message_desc(devinfo, msg_length, 0, header_present) |
2884 brw_dp_untyped_surface_write_desc(p, num_channels);
2885 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2886 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2887 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2888 WRITEMASK_X : WRITEMASK_XYZW;
2889
2890 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
2891 payload, surface, desc);
2892 }
2893
2894 static unsigned
2895 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
2896 {
2897 switch (bit_size) {
2898 case 8:
2899 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
2900 case 16:
2901 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
2902 case 32:
2903 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
2904 default:
2905 unreachable("Unsupported bit_size for byte scattered messages");
2906 }
2907 }
2908
2909 static uint32_t
2910 brw_dp_byte_scattered_desc(struct brw_codegen *p, unsigned bit_size,
2911 unsigned msg_type)
2912 {
2913 const struct gen_device_info *devinfo = p->devinfo;
2914 unsigned msg_control =
2915 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
2916
2917 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2918 msg_control |= 1; /* SIMD16 mode */
2919 else
2920 msg_control |= 0; /* SIMD8 mode */
2921
2922 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2923 }
2924
2925 void
2926 brw_byte_scattered_read(struct brw_codegen *p,
2927 struct brw_reg dst,
2928 struct brw_reg payload,
2929 struct brw_reg surface,
2930 unsigned msg_length,
2931 unsigned bit_size)
2932 {
2933 const struct gen_device_info *devinfo = p->devinfo;
2934 assert(devinfo->gen > 7 || devinfo->is_haswell);
2935 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2936 const unsigned response_length =
2937 brw_surface_payload_size(p, 1, true, true);
2938 const unsigned desc =
2939 brw_message_desc(devinfo, msg_length, response_length, false) |
2940 brw_dp_byte_scattered_desc(p, bit_size,
2941 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
2942
2943 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
2944 dst, payload, surface, desc);
2945 }
2946
2947 void
2948 brw_byte_scattered_write(struct brw_codegen *p,
2949 struct brw_reg payload,
2950 struct brw_reg surface,
2951 unsigned msg_length,
2952 unsigned bit_size,
2953 bool header_present)
2954 {
2955 const struct gen_device_info *devinfo = p->devinfo;
2956 assert(devinfo->gen > 7 || devinfo->is_haswell);
2957 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2958 const unsigned desc =
2959 brw_message_desc(devinfo, msg_length, 0, header_present) |
2960 brw_dp_byte_scattered_desc(p, bit_size,
2961 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
2962
2963 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
2964 brw_writemask(brw_null_reg(),
2965 WRITEMASK_XYZW),
2966 payload, surface, desc);
2967 }
2968
2969 static uint32_t
2970 brw_dp_typed_atomic_desc(struct brw_codegen *p,
2971 unsigned atomic_op,
2972 bool response_expected)
2973 {
2974 const struct gen_device_info *devinfo = p->devinfo;
2975 unsigned msg_control =
2976 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2977 (response_expected ? 1 << 5 : 0); /* Return data expected */
2978 unsigned msg_type;
2979
2980 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2981 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2982 if ((brw_get_default_group(p) / 8) % 2 == 1)
2983 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
2984
2985 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP;
2986 } else {
2987 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2;
2988 }
2989
2990 } else {
2991 if ((brw_get_default_group(p) / 8) % 2 == 1)
2992 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
2993
2994 msg_type = GEN7_DATAPORT_RC_TYPED_ATOMIC_OP;
2995 }
2996
2997 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2998 }
2999
3000 void
3001 brw_typed_atomic(struct brw_codegen *p,
3002 struct brw_reg dst,
3003 struct brw_reg payload,
3004 struct brw_reg surface,
3005 unsigned atomic_op,
3006 unsigned msg_length,
3007 bool response_expected,
3008 bool header_present) {
3009 const struct gen_device_info *devinfo = p->devinfo;
3010 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3011 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3012 GEN6_SFID_DATAPORT_RENDER_CACHE);
3013 const unsigned response_length = brw_surface_payload_size(
3014 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, false);
3015 const unsigned desc =
3016 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3017 brw_dp_typed_atomic_desc(p, atomic_op, response_expected);
3018 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3019 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3020 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3021
3022 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
3023 payload, surface, desc);
3024 }
3025
3026 static uint32_t
3027 brw_dp_typed_surface_read_desc(struct brw_codegen *p,
3028 unsigned num_channels)
3029 {
3030 const struct gen_device_info *devinfo = p->devinfo;
3031 /* Set mask of unused channels. */
3032 unsigned msg_control = 0xf & (0xf << num_channels);
3033 unsigned msg_type;
3034
3035 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3036 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3037 if ((brw_get_default_group(p) / 8) % 2 == 1)
3038 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3039 else
3040 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3041 }
3042
3043 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ;
3044 } else {
3045 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3046 if ((brw_get_default_group(p) / 8) % 2 == 1)
3047 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3048 }
3049
3050 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_READ;
3051 }
3052
3053 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3054 }
3055
3056 void
3057 brw_typed_surface_read(struct brw_codegen *p,
3058 struct brw_reg dst,
3059 struct brw_reg payload,
3060 struct brw_reg surface,
3061 unsigned msg_length,
3062 unsigned num_channels,
3063 bool header_present)
3064 {
3065 const struct gen_device_info *devinfo = p->devinfo;
3066 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3067 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3068 GEN6_SFID_DATAPORT_RENDER_CACHE);
3069 const unsigned response_length = brw_surface_payload_size(
3070 p, num_channels, devinfo->gen >= 8 || devinfo->is_haswell, false);
3071 const unsigned desc =
3072 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3073 brw_dp_typed_surface_read_desc(p, num_channels);
3074
3075 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
3076 }
3077
3078 static uint32_t
3079 brw_dp_typed_surface_write_desc(struct brw_codegen *p,
3080 unsigned num_channels)
3081 {
3082 const struct gen_device_info *devinfo = p->devinfo;
3083 /* Set mask of unused channels. */
3084 unsigned msg_control = 0xf & (0xf << num_channels);
3085 unsigned msg_type;
3086
3087 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3088 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3089 if ((brw_get_default_group(p) / 8) % 2 == 1)
3090 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3091 else
3092 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3093 }
3094
3095 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE;
3096
3097 } else {
3098 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3099 if ((brw_get_default_group(p) / 8) % 2 == 1)
3100 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3101 }
3102
3103 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE;
3104 }
3105
3106 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3107 }
3108
3109 void
3110 brw_typed_surface_write(struct brw_codegen *p,
3111 struct brw_reg payload,
3112 struct brw_reg surface,
3113 unsigned msg_length,
3114 unsigned num_channels,
3115 bool header_present)
3116 {
3117 const struct gen_device_info *devinfo = p->devinfo;
3118 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3119 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3120 GEN6_SFID_DATAPORT_RENDER_CACHE);
3121 const unsigned desc =
3122 brw_message_desc(devinfo, msg_length, 0, header_present) |
3123 brw_dp_typed_surface_write_desc(p, num_channels);
3124 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3125 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3126 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3127 WRITEMASK_X : WRITEMASK_XYZW);
3128
3129 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
3130 payload, surface, desc);
3131 }
3132
3133 static void
3134 brw_set_memory_fence_message(struct brw_codegen *p,
3135 struct brw_inst *insn,
3136 enum brw_message_target sfid,
3137 bool commit_enable)
3138 {
3139 const struct gen_device_info *devinfo = p->devinfo;
3140
3141 brw_set_desc(p, insn, brw_message_desc(
3142 devinfo, 1, (commit_enable ? 1 : 0), true));
3143
3144 brw_inst_set_sfid(devinfo, insn, sfid);
3145
3146 switch (sfid) {
3147 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3148 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3149 break;
3150 case GEN7_SFID_DATAPORT_DATA_CACHE:
3151 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3152 break;
3153 default:
3154 unreachable("Not reached");
3155 }
3156
3157 if (commit_enable)
3158 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3159 }
3160
3161 void
3162 brw_memory_fence(struct brw_codegen *p,
3163 struct brw_reg dst,
3164 enum opcode send_op)
3165 {
3166 const struct gen_device_info *devinfo = p->devinfo;
3167 const bool commit_enable =
3168 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3169 (devinfo->gen == 7 && !devinfo->is_haswell);
3170 struct brw_inst *insn;
3171
3172 brw_push_insn_state(p);
3173 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3174 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3175 dst = vec1(dst);
3176
3177 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3178 * message doesn't write anything back.
3179 */
3180 insn = next_insn(p, send_op);
3181 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3182 brw_set_dest(p, insn, dst);
3183 brw_set_src0(p, insn, dst);
3184 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3185 commit_enable);
3186
3187 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3188 /* IVB does typed surface access through the render cache, so we need to
3189 * flush it too. Use a different register so both flushes can be
3190 * pipelined by the hardware.
3191 */
3192 insn = next_insn(p, send_op);
3193 brw_set_dest(p, insn, offset(dst, 1));
3194 brw_set_src0(p, insn, offset(dst, 1));
3195 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3196 commit_enable);
3197
3198 /* Now write the response of the second message into the response of the
3199 * first to trigger a pipeline stall -- This way future render and data
3200 * cache messages will be properly ordered with respect to past data and
3201 * render cache messages.
3202 */
3203 brw_MOV(p, dst, offset(dst, 1));
3204 }
3205
3206 brw_pop_insn_state(p);
3207 }
3208
3209 void
3210 brw_pixel_interpolator_query(struct brw_codegen *p,
3211 struct brw_reg dest,
3212 struct brw_reg mrf,
3213 bool noperspective,
3214 unsigned mode,
3215 struct brw_reg data,
3216 unsigned msg_length,
3217 unsigned response_length)
3218 {
3219 const struct gen_device_info *devinfo = p->devinfo;
3220 const uint16_t exec_size = brw_get_default_exec_size(p);
3221 const unsigned slot_group = brw_get_default_group(p) / 16;
3222 const unsigned simd_mode = (exec_size == BRW_EXECUTE_16);
3223 const unsigned desc =
3224 brw_message_desc(devinfo, msg_length, response_length, false) |
3225 brw_pixel_interp_desc(devinfo, mode, noperspective, simd_mode,
3226 slot_group);
3227
3228 /* brw_send_indirect_message will automatically use a direct send message
3229 * if data is actually immediate.
3230 */
3231 brw_send_indirect_message(p,
3232 GEN7_SFID_PIXEL_INTERPOLATOR,
3233 dest,
3234 mrf,
3235 vec1(data),
3236 desc);
3237 }
3238
3239 void
3240 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3241 struct brw_reg mask)
3242 {
3243 const struct gen_device_info *devinfo = p->devinfo;
3244 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3245 const unsigned qtr_control = brw_get_default_group(p) / 8;
3246 brw_inst *inst;
3247
3248 assert(devinfo->gen >= 7);
3249 assert(mask.type == BRW_REGISTER_TYPE_UD);
3250
3251 brw_push_insn_state(p);
3252
3253 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3254 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3255
3256 if (devinfo->gen >= 8) {
3257 /* Getting the first active channel index is easy on Gen8: Just find
3258 * the first bit set in the execution mask. The register exists on
3259 * HSW already but it reads back as all ones when the current
3260 * instruction has execution masking disabled, so it's kind of
3261 * useless.
3262 */
3263 struct brw_reg exec_mask =
3264 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3265
3266 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3267 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3268 /* Unfortunately, ce0 does not take into account the thread
3269 * dispatch mask, which may be a problem in cases where it's not
3270 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3271 * some n). Combine ce0 with the given dispatch (or vector) mask
3272 * to mask off those channels which were never dispatched by the
3273 * hardware.
3274 */
3275 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3276 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3277 exec_mask = vec1(dst);
3278 }
3279
3280 /* Quarter control has the effect of magically shifting the value of
3281 * ce0 so you'll get the first active channel relative to the
3282 * specified quarter control as result.
3283 */
3284 inst = brw_FBL(p, vec1(dst), exec_mask);
3285 } else {
3286 const struct brw_reg flag = brw_flag_reg(p->current->flag_subreg / 2,
3287 p->current->flag_subreg % 2);
3288
3289 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3290 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3291
3292 /* Run enough instructions returning zero with execution masking and
3293 * a conditional modifier enabled in order to get the full execution
3294 * mask in f1.0. We could use a single 32-wide move here if it
3295 * weren't because of the hardware bug that causes channel enables to
3296 * be applied incorrectly to the second half of 32-wide instructions
3297 * on Gen7.
3298 */
3299 const unsigned lower_size = MIN2(16, exec_size);
3300 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3301 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3302 brw_imm_uw(0));
3303 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3304 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3305 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3306 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3307 }
3308
3309 /* Find the first bit set in the exec_size-wide portion of the flag
3310 * register that was updated by the last sequence of MOV
3311 * instructions.
3312 */
3313 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3314 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3315 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3316 }
3317 } else {
3318 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3319
3320 if (devinfo->gen >= 8 &&
3321 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3322 /* In SIMD4x2 mode the first active channel index is just the
3323 * negation of the first bit of the mask register. Note that ce0
3324 * doesn't take into account the dispatch mask, so the Gen7 path
3325 * should be used instead unless you have the guarantee that the
3326 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3327 * for some n).
3328 */
3329 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3330 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3331 brw_imm_ud(1));
3332
3333 } else {
3334 /* Overwrite the destination without and with execution masking to
3335 * find out which of the channels is active.
3336 */
3337 brw_push_insn_state(p);
3338 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3339 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3340 brw_imm_ud(1));
3341
3342 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3343 brw_imm_ud(0));
3344 brw_pop_insn_state(p);
3345 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3346 }
3347 }
3348
3349 brw_pop_insn_state(p);
3350 }
3351
3352 void
3353 brw_broadcast(struct brw_codegen *p,
3354 struct brw_reg dst,
3355 struct brw_reg src,
3356 struct brw_reg idx)
3357 {
3358 const struct gen_device_info *devinfo = p->devinfo;
3359 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3360 brw_inst *inst;
3361
3362 brw_push_insn_state(p);
3363 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3364 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3365
3366 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3367 src.address_mode == BRW_ADDRESS_DIRECT);
3368 assert(!src.abs && !src.negate);
3369 assert(src.type == dst.type);
3370
3371 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3372 idx.file == BRW_IMMEDIATE_VALUE) {
3373 /* Trivial, the source is already uniform or the index is a constant.
3374 * We will typically not get here if the optimizer is doing its job, but
3375 * asserting would be mean.
3376 */
3377 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3378 brw_MOV(p, dst,
3379 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3380 stride(suboffset(src, 4 * i), 0, 4, 1)));
3381 } else {
3382 /* From the Haswell PRM section "Register Region Restrictions":
3383 *
3384 * "The lower bits of the AddressImmediate must not overflow to
3385 * change the register address. The lower 5 bits of Address
3386 * Immediate when added to lower 5 bits of address register gives
3387 * the sub-register offset. The upper bits of Address Immediate
3388 * when added to upper bits of address register gives the register
3389 * address. Any overflow from sub-register offset is dropped."
3390 *
3391 * Fortunately, for broadcast, we never have a sub-register offset so
3392 * this isn't an issue.
3393 */
3394 assert(src.subnr == 0);
3395
3396 if (align1) {
3397 const struct brw_reg addr =
3398 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3399 unsigned offset = src.nr * REG_SIZE + src.subnr;
3400 /* Limit in bytes of the signed indirect addressing immediate. */
3401 const unsigned limit = 512;
3402
3403 brw_push_insn_state(p);
3404 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3405 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3406
3407 /* Take into account the component size and horizontal stride. */
3408 assert(src.vstride == src.hstride + src.width);
3409 brw_SHL(p, addr, vec1(idx),
3410 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3411 src.hstride - 1));
3412
3413 /* We can only address up to limit bytes using the indirect
3414 * addressing immediate, account for the difference if the source
3415 * register is above this limit.
3416 */
3417 if (offset >= limit) {
3418 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3419 offset = offset % limit;
3420 }
3421
3422 brw_pop_insn_state(p);
3423
3424 /* Use indirect addressing to fetch the specified component. */
3425 if (type_sz(src.type) > 4 &&
3426 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3427 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3428 *
3429 * "When source or destination datatype is 64b or operation is
3430 * integer DWord multiply, indirect addressing must not be
3431 * used."
3432 *
3433 * To work around both of this issue, we do two integer MOVs
3434 * insead of one 64-bit MOV. Because no double value should ever
3435 * cross a register boundary, it's safe to use the immediate
3436 * offset in the indirect here to handle adding 4 bytes to the
3437 * offset and avoid the extra ADD to the register file.
3438 */
3439 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3440 retype(brw_vec1_indirect(addr.subnr, offset),
3441 BRW_REGISTER_TYPE_D));
3442 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3443 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3444 BRW_REGISTER_TYPE_D));
3445 } else {
3446 brw_MOV(p, dst,
3447 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3448 }
3449 } else {
3450 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3451 * to all bits of a flag register,
3452 */
3453 inst = brw_MOV(p,
3454 brw_null_reg(),
3455 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3456 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3457 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3458 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3459
3460 /* and use predicated SEL to pick the right channel. */
3461 inst = brw_SEL(p, dst,
3462 stride(suboffset(src, 4), 4, 4, 1),
3463 stride(src, 4, 4, 1));
3464 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3465 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3466 }
3467 }
3468
3469 brw_pop_insn_state(p);
3470 }
3471
3472 /**
3473 * This instruction is generated as a single-channel align1 instruction by
3474 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3475 *
3476 * We can't use the typed atomic op in the FS because that has the execution
3477 * mask ANDed with the pixel mask, but we just want to write the one dword for
3478 * all the pixels.
3479 *
3480 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3481 * one u32. So we use the same untyped atomic write message as the pixel
3482 * shader.
3483 *
3484 * The untyped atomic operation requires a BUFFER surface type with RAW
3485 * format, and is only accessible through the legacy DATA_CACHE dataport
3486 * messages.
3487 */
3488 void brw_shader_time_add(struct brw_codegen *p,
3489 struct brw_reg payload,
3490 uint32_t surf_index)
3491 {
3492 const struct gen_device_info *devinfo = p->devinfo;
3493 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3494 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3495 GEN7_SFID_DATAPORT_DATA_CACHE);
3496 assert(devinfo->gen >= 7);
3497
3498 brw_push_insn_state(p);
3499 brw_set_default_access_mode(p, BRW_ALIGN_1);
3500 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3501 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3502 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3503
3504 /* We use brw_vec1_reg and unmasked because we want to increment the given
3505 * offset only once.
3506 */
3507 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3508 BRW_ARF_NULL, 0));
3509 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3510 payload.nr, 0));
3511 brw_set_desc(p, send, (brw_message_desc(devinfo, 2, 0, false) |
3512 brw_dp_untyped_atomic_desc(p, BRW_AOP_ADD, false)));
3513
3514 brw_inst_set_sfid(devinfo, send, sfid);
3515 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3516
3517 brw_pop_insn_state(p);
3518 }
3519
3520
3521 /**
3522 * Emit the SEND message for a barrier
3523 */
3524 void
3525 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3526 {
3527 const struct gen_device_info *devinfo = p->devinfo;
3528 struct brw_inst *inst;
3529
3530 assert(devinfo->gen >= 7);
3531
3532 brw_push_insn_state(p);
3533 brw_set_default_access_mode(p, BRW_ALIGN_1);
3534 inst = next_insn(p, BRW_OPCODE_SEND);
3535 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3536 brw_set_src0(p, inst, src);
3537 brw_set_src1(p, inst, brw_null_reg());
3538 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3539
3540 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3541 brw_inst_set_gateway_notify(devinfo, inst, 1);
3542 brw_inst_set_gateway_subfuncid(devinfo, inst,
3543 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3544
3545 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3546 brw_pop_insn_state(p);
3547 }
3548
3549
3550 /**
3551 * Emit the wait instruction for a barrier
3552 */
3553 void
3554 brw_WAIT(struct brw_codegen *p)
3555 {
3556 const struct gen_device_info *devinfo = p->devinfo;
3557 struct brw_inst *insn;
3558
3559 struct brw_reg src = brw_notification_reg();
3560
3561 insn = next_insn(p, BRW_OPCODE_WAIT);
3562 brw_set_dest(p, insn, src);
3563 brw_set_src0(p, insn, src);
3564 brw_set_src1(p, insn, brw_null_reg());
3565
3566 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3567 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3568 }
3569
3570 /**
3571 * Changes the floating point rounding mode updating the control register
3572 * field defined at cr0.0[5-6] bits. This function supports the changes to
3573 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3574 * Only RTNE and RTZ rounding are enabled at nir.
3575 */
3576 void
3577 brw_rounding_mode(struct brw_codegen *p,
3578 enum brw_rnd_mode mode)
3579 {
3580 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3581
3582 if (bits != BRW_CR0_RND_MODE_MASK) {
3583 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3584 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3585 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3586
3587 /* From the Skylake PRM, Volume 7, page 760:
3588 * "Implementation Restriction on Register Access: When the control
3589 * register is used as an explicit source and/or destination, hardware
3590 * does not ensure execution pipeline coherency. Software must set the
3591 * thread control field to ‘switch’ for an instruction that uses
3592 * control register as an explicit operand."
3593 */
3594 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3595 }
3596
3597 if (bits) {
3598 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3599 brw_imm_ud(bits));
3600 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3601 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3602 }
3603 }