intel/eu: Provide single descriptor argument to brw_send_indirect_surface_message().
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Specify the descriptor and extended descriptor immediate for a SEND(C)
364 * message instruction.
365 */
366 void
367 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
368 unsigned desc, unsigned ex_desc)
369 {
370 const struct gen_device_info *devinfo = p->devinfo;
371 brw_inst_set_src1_file_type(devinfo, inst,
372 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_D);
373 brw_inst_set_send_desc(devinfo, inst, desc);
374 if (devinfo->gen >= 9 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
375 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC))
376 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
377 }
378
379 static void brw_set_math_message( struct brw_codegen *p,
380 brw_inst *inst,
381 unsigned function,
382 unsigned integer_type,
383 bool low_precision,
384 unsigned dataType )
385 {
386 const struct gen_device_info *devinfo = p->devinfo;
387 unsigned msg_length;
388 unsigned response_length;
389
390 /* Infer message length from the function */
391 switch (function) {
392 case BRW_MATH_FUNCTION_POW:
393 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
394 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
395 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
396 msg_length = 2;
397 break;
398 default:
399 msg_length = 1;
400 break;
401 }
402
403 /* Infer response length from the function */
404 switch (function) {
405 case BRW_MATH_FUNCTION_SINCOS:
406 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
407 response_length = 2;
408 break;
409 default:
410 response_length = 1;
411 break;
412 }
413
414 brw_set_desc(p, inst, brw_message_desc(
415 devinfo, msg_length, response_length, false));
416
417 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
418 brw_inst_set_math_msg_function(devinfo, inst, function);
419 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
420 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
421 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
422 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
423 brw_inst_set_saturate(devinfo, inst, 0);
424 }
425
426
427 static void brw_set_ff_sync_message(struct brw_codegen *p,
428 brw_inst *insn,
429 bool allocate,
430 unsigned response_length,
431 bool end_of_thread)
432 {
433 const struct gen_device_info *devinfo = p->devinfo;
434
435 brw_set_desc(p, insn, brw_message_desc(
436 devinfo, 1, response_length, true));
437
438 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
439 brw_inst_set_eot(devinfo, insn, end_of_thread);
440 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
441 brw_inst_set_urb_allocate(devinfo, insn, allocate);
442 /* The following fields are not used by FF_SYNC: */
443 brw_inst_set_urb_global_offset(devinfo, insn, 0);
444 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
445 brw_inst_set_urb_used(devinfo, insn, 0);
446 brw_inst_set_urb_complete(devinfo, insn, 0);
447 }
448
449 static void brw_set_urb_message( struct brw_codegen *p,
450 brw_inst *insn,
451 enum brw_urb_write_flags flags,
452 unsigned msg_length,
453 unsigned response_length,
454 unsigned offset,
455 unsigned swizzle_control )
456 {
457 const struct gen_device_info *devinfo = p->devinfo;
458
459 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
460 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
461 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
462
463 brw_set_desc(p, insn, brw_message_desc(
464 devinfo, msg_length, response_length, true));
465
466 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
467 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
468
469 if (flags & BRW_URB_WRITE_OWORD) {
470 assert(msg_length == 2); /* header + one OWORD of data */
471 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
472 } else {
473 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
474 }
475
476 brw_inst_set_urb_global_offset(devinfo, insn, offset);
477 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
478
479 if (devinfo->gen < 8) {
480 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
481 }
482
483 if (devinfo->gen < 7) {
484 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
485 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
486 } else {
487 brw_inst_set_urb_per_slot_offset(devinfo, insn,
488 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
489 }
490 }
491
492 static void
493 gen7_set_dp_scratch_message(struct brw_codegen *p,
494 brw_inst *inst,
495 bool write,
496 bool dword,
497 bool invalidate_after_read,
498 unsigned num_regs,
499 unsigned addr_offset,
500 unsigned mlen,
501 unsigned rlen,
502 bool header_present)
503 {
504 const struct gen_device_info *devinfo = p->devinfo;
505 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
506 (devinfo->gen >= 8 && num_regs == 8));
507 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
508 num_regs - 1);
509
510 brw_set_desc(p, inst, brw_message_desc(
511 devinfo, mlen, rlen, header_present));
512
513 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
514 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
515 brw_inst_set_scratch_read_write(devinfo, inst, write);
516 brw_inst_set_scratch_type(devinfo, inst, dword);
517 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
518 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
519 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
520 }
521
522 static void
523 brw_inst_set_state(const struct gen_device_info *devinfo,
524 brw_inst *insn,
525 const struct brw_insn_state *state)
526 {
527 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
528 brw_inst_set_group(devinfo, insn, state->group);
529 brw_inst_set_compression(devinfo, insn, state->compressed);
530 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
531 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
532 brw_inst_set_saturate(devinfo, insn, state->saturate);
533 brw_inst_set_pred_control(devinfo, insn, state->predicate);
534 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
535
536 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
537 state->access_mode == BRW_ALIGN_16) {
538 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
539 if (devinfo->gen >= 7)
540 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
541 } else {
542 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
543 if (devinfo->gen >= 7)
544 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
545 }
546
547 if (devinfo->gen >= 6)
548 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
549 }
550
551 #define next_insn brw_next_insn
552 brw_inst *
553 brw_next_insn(struct brw_codegen *p, unsigned opcode)
554 {
555 const struct gen_device_info *devinfo = p->devinfo;
556 brw_inst *insn;
557
558 if (p->nr_insn + 1 > p->store_size) {
559 p->store_size <<= 1;
560 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
561 }
562
563 p->next_insn_offset += 16;
564 insn = &p->store[p->nr_insn++];
565
566 memset(insn, 0, sizeof(*insn));
567 brw_inst_set_opcode(devinfo, insn, opcode);
568
569 /* Apply the default instruction state */
570 brw_inst_set_state(devinfo, insn, p->current);
571
572 return insn;
573 }
574
575 static brw_inst *
576 brw_alu1(struct brw_codegen *p, unsigned opcode,
577 struct brw_reg dest, struct brw_reg src)
578 {
579 brw_inst *insn = next_insn(p, opcode);
580 brw_set_dest(p, insn, dest);
581 brw_set_src0(p, insn, src);
582 return insn;
583 }
584
585 static brw_inst *
586 brw_alu2(struct brw_codegen *p, unsigned opcode,
587 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
588 {
589 /* 64-bit immediates are only supported on 1-src instructions */
590 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
591 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
592
593 brw_inst *insn = next_insn(p, opcode);
594 brw_set_dest(p, insn, dest);
595 brw_set_src0(p, insn, src0);
596 brw_set_src1(p, insn, src1);
597 return insn;
598 }
599
600 static int
601 get_3src_subreg_nr(struct brw_reg reg)
602 {
603 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
604 * use 32-bit units (components 0..7). Since they only support F/D/UD
605 * types, this doesn't lose any flexibility, but uses fewer bits.
606 */
607 return reg.subnr / 4;
608 }
609
610 static enum gen10_align1_3src_vertical_stride
611 to_3src_align1_vstride(enum brw_vertical_stride vstride)
612 {
613 switch (vstride) {
614 case BRW_VERTICAL_STRIDE_0:
615 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
616 case BRW_VERTICAL_STRIDE_2:
617 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
618 case BRW_VERTICAL_STRIDE_4:
619 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
620 case BRW_VERTICAL_STRIDE_8:
621 case BRW_VERTICAL_STRIDE_16:
622 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
623 default:
624 unreachable("invalid vstride");
625 }
626 }
627
628
629 static enum gen10_align1_3src_src_horizontal_stride
630 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
631 {
632 switch (hstride) {
633 case BRW_HORIZONTAL_STRIDE_0:
634 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
635 case BRW_HORIZONTAL_STRIDE_1:
636 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
637 case BRW_HORIZONTAL_STRIDE_2:
638 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
639 case BRW_HORIZONTAL_STRIDE_4:
640 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
641 default:
642 unreachable("invalid hstride");
643 }
644 }
645
646 static brw_inst *
647 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
648 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
649 {
650 const struct gen_device_info *devinfo = p->devinfo;
651 brw_inst *inst = next_insn(p, opcode);
652
653 gen7_convert_mrf_to_grf(p, &dest);
654
655 assert(dest.nr < 128);
656 assert(src0.nr < 128);
657 assert(src1.nr < 128);
658 assert(src2.nr < 128);
659 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
660 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
661 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
662 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
663
664 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
665 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
666 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
667
668 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
669 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
670 BRW_ALIGN1_3SRC_ACCUMULATOR);
671 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
672 } else {
673 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
674 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
675 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
676 }
677 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
678
679 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
680
681 if (brw_reg_type_is_floating_point(dest.type)) {
682 brw_inst_set_3src_a1_exec_type(devinfo, inst,
683 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
684 } else {
685 brw_inst_set_3src_a1_exec_type(devinfo, inst,
686 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
687 }
688
689 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
690 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
691 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
692 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
693
694 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
695 to_3src_align1_vstride(src0.vstride));
696 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
697 to_3src_align1_vstride(src1.vstride));
698 /* no vstride on src2 */
699
700 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
701 to_3src_align1_hstride(src0.hstride));
702 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
703 to_3src_align1_hstride(src1.hstride));
704 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
705 to_3src_align1_hstride(src2.hstride));
706
707 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
708 if (src0.type == BRW_REGISTER_TYPE_NF) {
709 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
710 } else {
711 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
712 }
713 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
714 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
715
716 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
717 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
718 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
719 } else {
720 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
721 }
722 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
723 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
724
725 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
726 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
727 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
728 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
729
730 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
731 src0.file == BRW_IMMEDIATE_VALUE ||
732 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
733 src0.type == BRW_REGISTER_TYPE_NF));
734 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
735 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
736 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
737 src2.file == BRW_IMMEDIATE_VALUE);
738
739 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
740 src0.file == BRW_GENERAL_REGISTER_FILE ?
741 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
742 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
743 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
744 src1.file == BRW_GENERAL_REGISTER_FILE ?
745 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
746 BRW_ALIGN1_3SRC_ACCUMULATOR);
747 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
748 src2.file == BRW_GENERAL_REGISTER_FILE ?
749 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
750 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
751 } else {
752 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
753 dest.file == BRW_MESSAGE_REGISTER_FILE);
754 assert(dest.type == BRW_REGISTER_TYPE_F ||
755 dest.type == BRW_REGISTER_TYPE_DF ||
756 dest.type == BRW_REGISTER_TYPE_D ||
757 dest.type == BRW_REGISTER_TYPE_UD);
758 if (devinfo->gen == 6) {
759 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
760 dest.file == BRW_MESSAGE_REGISTER_FILE);
761 }
762 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
763 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
764 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
765
766 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
767 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
768 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
769 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
770 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
771 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
772 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
773 src0.vstride == BRW_VERTICAL_STRIDE_0);
774
775 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
776 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
777 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
778 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
779 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
780 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
781 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
782 src1.vstride == BRW_VERTICAL_STRIDE_0);
783
784 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
785 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
786 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
787 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
788 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
789 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
790 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
791 src2.vstride == BRW_VERTICAL_STRIDE_0);
792
793 if (devinfo->gen >= 7) {
794 /* Set both the source and destination types based on dest.type,
795 * ignoring the source register types. The MAD and LRP emitters ensure
796 * that all four types are float. The BFE and BFI2 emitters, however,
797 * may send us mixed D and UD types and want us to ignore that and use
798 * the destination type.
799 */
800 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
801 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
802 }
803 }
804
805 return inst;
806 }
807
808
809 /***********************************************************************
810 * Convenience routines.
811 */
812 #define ALU1(OP) \
813 brw_inst *brw_##OP(struct brw_codegen *p, \
814 struct brw_reg dest, \
815 struct brw_reg src0) \
816 { \
817 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
818 }
819
820 #define ALU2(OP) \
821 brw_inst *brw_##OP(struct brw_codegen *p, \
822 struct brw_reg dest, \
823 struct brw_reg src0, \
824 struct brw_reg src1) \
825 { \
826 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
827 }
828
829 #define ALU3(OP) \
830 brw_inst *brw_##OP(struct brw_codegen *p, \
831 struct brw_reg dest, \
832 struct brw_reg src0, \
833 struct brw_reg src1, \
834 struct brw_reg src2) \
835 { \
836 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
837 }
838
839 #define ALU3F(OP) \
840 brw_inst *brw_##OP(struct brw_codegen *p, \
841 struct brw_reg dest, \
842 struct brw_reg src0, \
843 struct brw_reg src1, \
844 struct brw_reg src2) \
845 { \
846 assert(dest.type == BRW_REGISTER_TYPE_F || \
847 dest.type == BRW_REGISTER_TYPE_DF); \
848 if (dest.type == BRW_REGISTER_TYPE_F) { \
849 assert(src0.type == BRW_REGISTER_TYPE_F); \
850 assert(src1.type == BRW_REGISTER_TYPE_F); \
851 assert(src2.type == BRW_REGISTER_TYPE_F); \
852 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
853 assert(src0.type == BRW_REGISTER_TYPE_DF); \
854 assert(src1.type == BRW_REGISTER_TYPE_DF); \
855 assert(src2.type == BRW_REGISTER_TYPE_DF); \
856 } \
857 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
858 }
859
860 /* Rounding operations (other than RNDD) require two instructions - the first
861 * stores a rounded value (possibly the wrong way) in the dest register, but
862 * also sets a per-channel "increment bit" in the flag register. A predicated
863 * add of 1.0 fixes dest to contain the desired result.
864 *
865 * Sandybridge and later appear to round correctly without an ADD.
866 */
867 #define ROUND(OP) \
868 void brw_##OP(struct brw_codegen *p, \
869 struct brw_reg dest, \
870 struct brw_reg src) \
871 { \
872 const struct gen_device_info *devinfo = p->devinfo; \
873 brw_inst *rnd, *add; \
874 rnd = next_insn(p, BRW_OPCODE_##OP); \
875 brw_set_dest(p, rnd, dest); \
876 brw_set_src0(p, rnd, src); \
877 \
878 if (devinfo->gen < 6) { \
879 /* turn on round-increments */ \
880 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
881 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
882 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
883 } \
884 }
885
886
887 ALU2(SEL)
888 ALU1(NOT)
889 ALU2(AND)
890 ALU2(OR)
891 ALU2(XOR)
892 ALU2(SHR)
893 ALU2(SHL)
894 ALU1(DIM)
895 ALU2(ASR)
896 ALU3(CSEL)
897 ALU1(FRC)
898 ALU1(RNDD)
899 ALU2(MAC)
900 ALU2(MACH)
901 ALU1(LZD)
902 ALU2(DP4)
903 ALU2(DPH)
904 ALU2(DP3)
905 ALU2(DP2)
906 ALU3(MAD)
907 ALU3F(LRP)
908 ALU1(BFREV)
909 ALU3(BFE)
910 ALU2(BFI1)
911 ALU3(BFI2)
912 ALU1(FBH)
913 ALU1(FBL)
914 ALU1(CBIT)
915 ALU2(ADDC)
916 ALU2(SUBB)
917
918 ROUND(RNDZ)
919 ROUND(RNDE)
920
921 brw_inst *
922 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
923 {
924 const struct gen_device_info *devinfo = p->devinfo;
925
926 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
927 * To avoid the problems that causes, we use a <1,2,0> source region to read
928 * each element twice.
929 */
930 if (devinfo->gen == 7 && !devinfo->is_haswell &&
931 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
932 dest.type == BRW_REGISTER_TYPE_DF &&
933 (src0.type == BRW_REGISTER_TYPE_F ||
934 src0.type == BRW_REGISTER_TYPE_D ||
935 src0.type == BRW_REGISTER_TYPE_UD) &&
936 !has_scalar_region(src0)) {
937 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
938 src0.width == BRW_WIDTH_4 &&
939 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
940
941 src0.vstride = BRW_VERTICAL_STRIDE_1;
942 src0.width = BRW_WIDTH_2;
943 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
944 }
945
946 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
947 }
948
949 brw_inst *
950 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
951 struct brw_reg src0, struct brw_reg src1)
952 {
953 /* 6.2.2: add */
954 if (src0.type == BRW_REGISTER_TYPE_F ||
955 (src0.file == BRW_IMMEDIATE_VALUE &&
956 src0.type == BRW_REGISTER_TYPE_VF)) {
957 assert(src1.type != BRW_REGISTER_TYPE_UD);
958 assert(src1.type != BRW_REGISTER_TYPE_D);
959 }
960
961 if (src1.type == BRW_REGISTER_TYPE_F ||
962 (src1.file == BRW_IMMEDIATE_VALUE &&
963 src1.type == BRW_REGISTER_TYPE_VF)) {
964 assert(src0.type != BRW_REGISTER_TYPE_UD);
965 assert(src0.type != BRW_REGISTER_TYPE_D);
966 }
967
968 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
969 }
970
971 brw_inst *
972 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
973 struct brw_reg src0, struct brw_reg src1)
974 {
975 assert(dest.type == src0.type);
976 assert(src0.type == src1.type);
977 switch (src0.type) {
978 case BRW_REGISTER_TYPE_B:
979 case BRW_REGISTER_TYPE_UB:
980 case BRW_REGISTER_TYPE_W:
981 case BRW_REGISTER_TYPE_UW:
982 case BRW_REGISTER_TYPE_D:
983 case BRW_REGISTER_TYPE_UD:
984 break;
985 default:
986 unreachable("Bad type for brw_AVG");
987 }
988
989 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
990 }
991
992 brw_inst *
993 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
994 struct brw_reg src0, struct brw_reg src1)
995 {
996 /* 6.32.38: mul */
997 if (src0.type == BRW_REGISTER_TYPE_D ||
998 src0.type == BRW_REGISTER_TYPE_UD ||
999 src1.type == BRW_REGISTER_TYPE_D ||
1000 src1.type == BRW_REGISTER_TYPE_UD) {
1001 assert(dest.type != BRW_REGISTER_TYPE_F);
1002 }
1003
1004 if (src0.type == BRW_REGISTER_TYPE_F ||
1005 (src0.file == BRW_IMMEDIATE_VALUE &&
1006 src0.type == BRW_REGISTER_TYPE_VF)) {
1007 assert(src1.type != BRW_REGISTER_TYPE_UD);
1008 assert(src1.type != BRW_REGISTER_TYPE_D);
1009 }
1010
1011 if (src1.type == BRW_REGISTER_TYPE_F ||
1012 (src1.file == BRW_IMMEDIATE_VALUE &&
1013 src1.type == BRW_REGISTER_TYPE_VF)) {
1014 assert(src0.type != BRW_REGISTER_TYPE_UD);
1015 assert(src0.type != BRW_REGISTER_TYPE_D);
1016 }
1017
1018 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1019 src0.nr != BRW_ARF_ACCUMULATOR);
1020 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1021 src1.nr != BRW_ARF_ACCUMULATOR);
1022
1023 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1024 }
1025
1026 brw_inst *
1027 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1028 struct brw_reg src0, struct brw_reg src1)
1029 {
1030 src0.vstride = BRW_VERTICAL_STRIDE_0;
1031 src0.width = BRW_WIDTH_1;
1032 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1033 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1034 }
1035
1036 brw_inst *
1037 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1038 struct brw_reg src0, struct brw_reg src1)
1039 {
1040 src0.vstride = BRW_VERTICAL_STRIDE_0;
1041 src0.width = BRW_WIDTH_1;
1042 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1043 src1.vstride = BRW_VERTICAL_STRIDE_8;
1044 src1.width = BRW_WIDTH_8;
1045 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1046 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1047 }
1048
1049 brw_inst *
1050 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1051 {
1052 const struct gen_device_info *devinfo = p->devinfo;
1053 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1054 /* The F32TO16 instruction doesn't support 32-bit destination types in
1055 * Align1 mode, and neither does the Gen8 implementation in terms of a
1056 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1057 * an undocumented feature.
1058 */
1059 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1060 (!align16 || devinfo->gen >= 8));
1061 brw_inst *inst;
1062
1063 if (align16) {
1064 assert(dst.type == BRW_REGISTER_TYPE_UD);
1065 } else {
1066 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1067 dst.type == BRW_REGISTER_TYPE_W ||
1068 dst.type == BRW_REGISTER_TYPE_UW ||
1069 dst.type == BRW_REGISTER_TYPE_HF);
1070 }
1071
1072 brw_push_insn_state(p);
1073
1074 if (needs_zero_fill) {
1075 brw_set_default_access_mode(p, BRW_ALIGN_1);
1076 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1077 }
1078
1079 if (devinfo->gen >= 8) {
1080 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1081 } else {
1082 assert(devinfo->gen == 7);
1083 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1084 }
1085
1086 if (needs_zero_fill) {
1087 brw_inst_set_no_dd_clear(devinfo, inst, true);
1088 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1089 brw_inst_set_no_dd_check(devinfo, inst, true);
1090 }
1091
1092 brw_pop_insn_state(p);
1093 return inst;
1094 }
1095
1096 brw_inst *
1097 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1098 {
1099 const struct gen_device_info *devinfo = p->devinfo;
1100 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1101
1102 if (align16) {
1103 assert(src.type == BRW_REGISTER_TYPE_UD);
1104 } else {
1105 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1106 *
1107 * Because this instruction does not have a 16-bit floating-point
1108 * type, the source data type must be Word (W). The destination type
1109 * must be F (Float).
1110 */
1111 if (src.type == BRW_REGISTER_TYPE_UD)
1112 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1113
1114 assert(src.type == BRW_REGISTER_TYPE_W ||
1115 src.type == BRW_REGISTER_TYPE_UW ||
1116 src.type == BRW_REGISTER_TYPE_HF);
1117 }
1118
1119 if (devinfo->gen >= 8) {
1120 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1121 } else {
1122 assert(devinfo->gen == 7);
1123 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1124 }
1125 }
1126
1127
1128 void brw_NOP(struct brw_codegen *p)
1129 {
1130 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1131 memset(insn, 0, sizeof(*insn));
1132 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1133 }
1134
1135
1136
1137
1138
1139 /***********************************************************************
1140 * Comparisons, if/else/endif
1141 */
1142
1143 brw_inst *
1144 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1145 unsigned predicate_control)
1146 {
1147 const struct gen_device_info *devinfo = p->devinfo;
1148 struct brw_reg ip = brw_ip_reg();
1149 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1150
1151 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1152 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1153 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1154 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1155
1156 return inst;
1157 }
1158
1159 static void
1160 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1161 {
1162 p->if_stack[p->if_stack_depth] = inst - p->store;
1163
1164 p->if_stack_depth++;
1165 if (p->if_stack_array_size <= p->if_stack_depth) {
1166 p->if_stack_array_size *= 2;
1167 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1168 p->if_stack_array_size);
1169 }
1170 }
1171
1172 static brw_inst *
1173 pop_if_stack(struct brw_codegen *p)
1174 {
1175 p->if_stack_depth--;
1176 return &p->store[p->if_stack[p->if_stack_depth]];
1177 }
1178
1179 static void
1180 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1181 {
1182 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1183 p->loop_stack_array_size *= 2;
1184 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1185 p->loop_stack_array_size);
1186 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1187 p->loop_stack_array_size);
1188 }
1189
1190 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1191 p->loop_stack_depth++;
1192 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1193 }
1194
1195 static brw_inst *
1196 get_inner_do_insn(struct brw_codegen *p)
1197 {
1198 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1199 }
1200
1201 /* EU takes the value from the flag register and pushes it onto some
1202 * sort of a stack (presumably merging with any flag value already on
1203 * the stack). Within an if block, the flags at the top of the stack
1204 * control execution on each channel of the unit, eg. on each of the
1205 * 16 pixel values in our wm programs.
1206 *
1207 * When the matching 'else' instruction is reached (presumably by
1208 * countdown of the instruction count patched in by our ELSE/ENDIF
1209 * functions), the relevant flags are inverted.
1210 *
1211 * When the matching 'endif' instruction is reached, the flags are
1212 * popped off. If the stack is now empty, normal execution resumes.
1213 */
1214 brw_inst *
1215 brw_IF(struct brw_codegen *p, unsigned execute_size)
1216 {
1217 const struct gen_device_info *devinfo = p->devinfo;
1218 brw_inst *insn;
1219
1220 insn = next_insn(p, BRW_OPCODE_IF);
1221
1222 /* Override the defaults for this instruction:
1223 */
1224 if (devinfo->gen < 6) {
1225 brw_set_dest(p, insn, brw_ip_reg());
1226 brw_set_src0(p, insn, brw_ip_reg());
1227 brw_set_src1(p, insn, brw_imm_d(0x0));
1228 } else if (devinfo->gen == 6) {
1229 brw_set_dest(p, insn, brw_imm_w(0));
1230 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1231 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1232 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1233 } else if (devinfo->gen == 7) {
1234 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1235 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1236 brw_set_src1(p, insn, brw_imm_w(0));
1237 brw_inst_set_jip(devinfo, insn, 0);
1238 brw_inst_set_uip(devinfo, insn, 0);
1239 } else {
1240 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1241 brw_set_src0(p, insn, brw_imm_d(0));
1242 brw_inst_set_jip(devinfo, insn, 0);
1243 brw_inst_set_uip(devinfo, insn, 0);
1244 }
1245
1246 brw_inst_set_exec_size(devinfo, insn, execute_size);
1247 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1248 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1249 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1250 if (!p->single_program_flow && devinfo->gen < 6)
1251 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1252
1253 push_if_stack(p, insn);
1254 p->if_depth_in_loop[p->loop_stack_depth]++;
1255 return insn;
1256 }
1257
1258 /* This function is only used for gen6-style IF instructions with an
1259 * embedded comparison (conditional modifier). It is not used on gen7.
1260 */
1261 brw_inst *
1262 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1263 struct brw_reg src0, struct brw_reg src1)
1264 {
1265 const struct gen_device_info *devinfo = p->devinfo;
1266 brw_inst *insn;
1267
1268 insn = next_insn(p, BRW_OPCODE_IF);
1269
1270 brw_set_dest(p, insn, brw_imm_w(0));
1271 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1272 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1273 brw_set_src0(p, insn, src0);
1274 brw_set_src1(p, insn, src1);
1275
1276 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1277 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1278 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1279
1280 push_if_stack(p, insn);
1281 return insn;
1282 }
1283
1284 /**
1285 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1286 */
1287 static void
1288 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1289 brw_inst *if_inst, brw_inst *else_inst)
1290 {
1291 const struct gen_device_info *devinfo = p->devinfo;
1292
1293 /* The next instruction (where the ENDIF would be, if it existed) */
1294 brw_inst *next_inst = &p->store[p->nr_insn];
1295
1296 assert(p->single_program_flow);
1297 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1298 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1299 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1300
1301 /* Convert IF to an ADD instruction that moves the instruction pointer
1302 * to the first instruction of the ELSE block. If there is no ELSE
1303 * block, point to where ENDIF would be. Reverse the predicate.
1304 *
1305 * There's no need to execute an ENDIF since we don't need to do any
1306 * stack operations, and if we're currently executing, we just want to
1307 * continue normally.
1308 */
1309 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1310 brw_inst_set_pred_inv(devinfo, if_inst, true);
1311
1312 if (else_inst != NULL) {
1313 /* Convert ELSE to an ADD instruction that points where the ENDIF
1314 * would be.
1315 */
1316 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1317
1318 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1319 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1320 } else {
1321 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1322 }
1323 }
1324
1325 /**
1326 * Patch IF and ELSE instructions with appropriate jump targets.
1327 */
1328 static void
1329 patch_IF_ELSE(struct brw_codegen *p,
1330 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1331 {
1332 const struct gen_device_info *devinfo = p->devinfo;
1333
1334 /* We shouldn't be patching IF and ELSE instructions in single program flow
1335 * mode when gen < 6, because in single program flow mode on those
1336 * platforms, we convert flow control instructions to conditional ADDs that
1337 * operate on IP (see brw_ENDIF).
1338 *
1339 * However, on Gen6, writing to IP doesn't work in single program flow mode
1340 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1341 * not be updated by non-flow control instructions."). And on later
1342 * platforms, there is no significant benefit to converting control flow
1343 * instructions to conditional ADDs. So we do patch IF and ELSE
1344 * instructions in single program flow mode on those platforms.
1345 */
1346 if (devinfo->gen < 6)
1347 assert(!p->single_program_flow);
1348
1349 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1350 assert(endif_inst != NULL);
1351 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1352
1353 unsigned br = brw_jump_scale(devinfo);
1354
1355 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1356 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1357
1358 if (else_inst == NULL) {
1359 /* Patch IF -> ENDIF */
1360 if (devinfo->gen < 6) {
1361 /* Turn it into an IFF, which means no mask stack operations for
1362 * all-false and jumping past the ENDIF.
1363 */
1364 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1365 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1366 br * (endif_inst - if_inst + 1));
1367 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1368 } else if (devinfo->gen == 6) {
1369 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1370 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1371 } else {
1372 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1373 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1374 }
1375 } else {
1376 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1377
1378 /* Patch IF -> ELSE */
1379 if (devinfo->gen < 6) {
1380 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1381 br * (else_inst - if_inst));
1382 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1383 } else if (devinfo->gen == 6) {
1384 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1385 br * (else_inst - if_inst + 1));
1386 }
1387
1388 /* Patch ELSE -> ENDIF */
1389 if (devinfo->gen < 6) {
1390 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1391 * matching ENDIF.
1392 */
1393 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1394 br * (endif_inst - else_inst + 1));
1395 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1396 } else if (devinfo->gen == 6) {
1397 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1398 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1399 br * (endif_inst - else_inst));
1400 } else {
1401 /* The IF instruction's JIP should point just past the ELSE */
1402 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1403 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1404 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1405 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1406 if (devinfo->gen >= 8) {
1407 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1408 * should point to ENDIF.
1409 */
1410 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1411 }
1412 }
1413 }
1414 }
1415
1416 void
1417 brw_ELSE(struct brw_codegen *p)
1418 {
1419 const struct gen_device_info *devinfo = p->devinfo;
1420 brw_inst *insn;
1421
1422 insn = next_insn(p, BRW_OPCODE_ELSE);
1423
1424 if (devinfo->gen < 6) {
1425 brw_set_dest(p, insn, brw_ip_reg());
1426 brw_set_src0(p, insn, brw_ip_reg());
1427 brw_set_src1(p, insn, brw_imm_d(0x0));
1428 } else if (devinfo->gen == 6) {
1429 brw_set_dest(p, insn, brw_imm_w(0));
1430 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1431 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1432 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1433 } else if (devinfo->gen == 7) {
1434 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1435 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1436 brw_set_src1(p, insn, brw_imm_w(0));
1437 brw_inst_set_jip(devinfo, insn, 0);
1438 brw_inst_set_uip(devinfo, insn, 0);
1439 } else {
1440 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1441 brw_set_src0(p, insn, brw_imm_d(0));
1442 brw_inst_set_jip(devinfo, insn, 0);
1443 brw_inst_set_uip(devinfo, insn, 0);
1444 }
1445
1446 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1447 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1448 if (!p->single_program_flow && devinfo->gen < 6)
1449 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1450
1451 push_if_stack(p, insn);
1452 }
1453
1454 void
1455 brw_ENDIF(struct brw_codegen *p)
1456 {
1457 const struct gen_device_info *devinfo = p->devinfo;
1458 brw_inst *insn = NULL;
1459 brw_inst *else_inst = NULL;
1460 brw_inst *if_inst = NULL;
1461 brw_inst *tmp;
1462 bool emit_endif = true;
1463
1464 /* In single program flow mode, we can express IF and ELSE instructions
1465 * equivalently as ADD instructions that operate on IP. On platforms prior
1466 * to Gen6, flow control instructions cause an implied thread switch, so
1467 * this is a significant savings.
1468 *
1469 * However, on Gen6, writing to IP doesn't work in single program flow mode
1470 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1471 * not be updated by non-flow control instructions."). And on later
1472 * platforms, there is no significant benefit to converting control flow
1473 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1474 * Gen5.
1475 */
1476 if (devinfo->gen < 6 && p->single_program_flow)
1477 emit_endif = false;
1478
1479 /*
1480 * A single next_insn() may change the base address of instruction store
1481 * memory(p->store), so call it first before referencing the instruction
1482 * store pointer from an index
1483 */
1484 if (emit_endif)
1485 insn = next_insn(p, BRW_OPCODE_ENDIF);
1486
1487 /* Pop the IF and (optional) ELSE instructions from the stack */
1488 p->if_depth_in_loop[p->loop_stack_depth]--;
1489 tmp = pop_if_stack(p);
1490 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1491 else_inst = tmp;
1492 tmp = pop_if_stack(p);
1493 }
1494 if_inst = tmp;
1495
1496 if (!emit_endif) {
1497 /* ENDIF is useless; don't bother emitting it. */
1498 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1499 return;
1500 }
1501
1502 if (devinfo->gen < 6) {
1503 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1504 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1505 brw_set_src1(p, insn, brw_imm_d(0x0));
1506 } else if (devinfo->gen == 6) {
1507 brw_set_dest(p, insn, brw_imm_w(0));
1508 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1509 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1510 } else if (devinfo->gen == 7) {
1511 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1512 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1513 brw_set_src1(p, insn, brw_imm_w(0));
1514 } else {
1515 brw_set_src0(p, insn, brw_imm_d(0));
1516 }
1517
1518 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1519 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1520 if (devinfo->gen < 6)
1521 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1522
1523 /* Also pop item off the stack in the endif instruction: */
1524 if (devinfo->gen < 6) {
1525 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1526 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1527 } else if (devinfo->gen == 6) {
1528 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1529 } else {
1530 brw_inst_set_jip(devinfo, insn, 2);
1531 }
1532 patch_IF_ELSE(p, if_inst, else_inst, insn);
1533 }
1534
1535 brw_inst *
1536 brw_BREAK(struct brw_codegen *p)
1537 {
1538 const struct gen_device_info *devinfo = p->devinfo;
1539 brw_inst *insn;
1540
1541 insn = next_insn(p, BRW_OPCODE_BREAK);
1542 if (devinfo->gen >= 8) {
1543 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1544 brw_set_src0(p, insn, brw_imm_d(0x0));
1545 } else if (devinfo->gen >= 6) {
1546 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1547 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1548 brw_set_src1(p, insn, brw_imm_d(0x0));
1549 } else {
1550 brw_set_dest(p, insn, brw_ip_reg());
1551 brw_set_src0(p, insn, brw_ip_reg());
1552 brw_set_src1(p, insn, brw_imm_d(0x0));
1553 brw_inst_set_gen4_pop_count(devinfo, insn,
1554 p->if_depth_in_loop[p->loop_stack_depth]);
1555 }
1556 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1557 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1558
1559 return insn;
1560 }
1561
1562 brw_inst *
1563 brw_CONT(struct brw_codegen *p)
1564 {
1565 const struct gen_device_info *devinfo = p->devinfo;
1566 brw_inst *insn;
1567
1568 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1569 brw_set_dest(p, insn, brw_ip_reg());
1570 if (devinfo->gen >= 8) {
1571 brw_set_src0(p, insn, brw_imm_d(0x0));
1572 } else {
1573 brw_set_src0(p, insn, brw_ip_reg());
1574 brw_set_src1(p, insn, brw_imm_d(0x0));
1575 }
1576
1577 if (devinfo->gen < 6) {
1578 brw_inst_set_gen4_pop_count(devinfo, insn,
1579 p->if_depth_in_loop[p->loop_stack_depth]);
1580 }
1581 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1582 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1583 return insn;
1584 }
1585
1586 brw_inst *
1587 gen6_HALT(struct brw_codegen *p)
1588 {
1589 const struct gen_device_info *devinfo = p->devinfo;
1590 brw_inst *insn;
1591
1592 insn = next_insn(p, BRW_OPCODE_HALT);
1593 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1594 if (devinfo->gen >= 8) {
1595 brw_set_src0(p, insn, brw_imm_d(0x0));
1596 } else {
1597 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1598 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1599 }
1600
1601 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1602 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1603 return insn;
1604 }
1605
1606 /* DO/WHILE loop:
1607 *
1608 * The DO/WHILE is just an unterminated loop -- break or continue are
1609 * used for control within the loop. We have a few ways they can be
1610 * done.
1611 *
1612 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1613 * jip and no DO instruction.
1614 *
1615 * For non-uniform control flow pre-gen6, there's a DO instruction to
1616 * push the mask, and a WHILE to jump back, and BREAK to get out and
1617 * pop the mask.
1618 *
1619 * For gen6, there's no more mask stack, so no need for DO. WHILE
1620 * just points back to the first instruction of the loop.
1621 */
1622 brw_inst *
1623 brw_DO(struct brw_codegen *p, unsigned execute_size)
1624 {
1625 const struct gen_device_info *devinfo = p->devinfo;
1626
1627 if (devinfo->gen >= 6 || p->single_program_flow) {
1628 push_loop_stack(p, &p->store[p->nr_insn]);
1629 return &p->store[p->nr_insn];
1630 } else {
1631 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1632
1633 push_loop_stack(p, insn);
1634
1635 /* Override the defaults for this instruction:
1636 */
1637 brw_set_dest(p, insn, brw_null_reg());
1638 brw_set_src0(p, insn, brw_null_reg());
1639 brw_set_src1(p, insn, brw_null_reg());
1640
1641 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1642 brw_inst_set_exec_size(devinfo, insn, execute_size);
1643 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1644
1645 return insn;
1646 }
1647 }
1648
1649 /**
1650 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1651 * instruction here.
1652 *
1653 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1654 * nesting, since it can always just point to the end of the block/current loop.
1655 */
1656 static void
1657 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1658 {
1659 const struct gen_device_info *devinfo = p->devinfo;
1660 brw_inst *do_inst = get_inner_do_insn(p);
1661 brw_inst *inst;
1662 unsigned br = brw_jump_scale(devinfo);
1663
1664 assert(devinfo->gen < 6);
1665
1666 for (inst = while_inst - 1; inst != do_inst; inst--) {
1667 /* If the jump count is != 0, that means that this instruction has already
1668 * been patched because it's part of a loop inside of the one we're
1669 * patching.
1670 */
1671 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1672 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1673 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1674 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1675 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1676 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1677 }
1678 }
1679 }
1680
1681 brw_inst *
1682 brw_WHILE(struct brw_codegen *p)
1683 {
1684 const struct gen_device_info *devinfo = p->devinfo;
1685 brw_inst *insn, *do_insn;
1686 unsigned br = brw_jump_scale(devinfo);
1687
1688 if (devinfo->gen >= 6) {
1689 insn = next_insn(p, BRW_OPCODE_WHILE);
1690 do_insn = get_inner_do_insn(p);
1691
1692 if (devinfo->gen >= 8) {
1693 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1694 brw_set_src0(p, insn, brw_imm_d(0));
1695 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1696 } else if (devinfo->gen == 7) {
1697 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1698 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1699 brw_set_src1(p, insn, brw_imm_w(0));
1700 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1701 } else {
1702 brw_set_dest(p, insn, brw_imm_w(0));
1703 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1704 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1705 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1706 }
1707
1708 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1709
1710 } else {
1711 if (p->single_program_flow) {
1712 insn = next_insn(p, BRW_OPCODE_ADD);
1713 do_insn = get_inner_do_insn(p);
1714
1715 brw_set_dest(p, insn, brw_ip_reg());
1716 brw_set_src0(p, insn, brw_ip_reg());
1717 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1718 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1719 } else {
1720 insn = next_insn(p, BRW_OPCODE_WHILE);
1721 do_insn = get_inner_do_insn(p);
1722
1723 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1724
1725 brw_set_dest(p, insn, brw_ip_reg());
1726 brw_set_src0(p, insn, brw_ip_reg());
1727 brw_set_src1(p, insn, brw_imm_d(0));
1728
1729 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1730 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1731 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1732
1733 brw_patch_break_cont(p, insn);
1734 }
1735 }
1736 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1737
1738 p->loop_stack_depth--;
1739
1740 return insn;
1741 }
1742
1743 /* FORWARD JUMPS:
1744 */
1745 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1746 {
1747 const struct gen_device_info *devinfo = p->devinfo;
1748 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1749 unsigned jmpi = 1;
1750
1751 if (devinfo->gen >= 5)
1752 jmpi = 2;
1753
1754 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1755 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1756
1757 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1758 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1759 }
1760
1761 /* To integrate with the above, it makes sense that the comparison
1762 * instruction should populate the flag register. It might be simpler
1763 * just to use the flag reg for most WM tasks?
1764 */
1765 void brw_CMP(struct brw_codegen *p,
1766 struct brw_reg dest,
1767 unsigned conditional,
1768 struct brw_reg src0,
1769 struct brw_reg src1)
1770 {
1771 const struct gen_device_info *devinfo = p->devinfo;
1772 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1773
1774 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1775 brw_set_dest(p, insn, dest);
1776 brw_set_src0(p, insn, src0);
1777 brw_set_src1(p, insn, src1);
1778
1779 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1780 * page says:
1781 * "Any CMP instruction with a null destination must use a {switch}."
1782 *
1783 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1784 * mentioned on their work-arounds pages.
1785 */
1786 if (devinfo->gen == 7) {
1787 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1788 dest.nr == BRW_ARF_NULL) {
1789 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1790 }
1791 }
1792 }
1793
1794 /***********************************************************************
1795 * Helpers for the various SEND message types:
1796 */
1797
1798 /** Extended math function, float[8].
1799 */
1800 void gen4_math(struct brw_codegen *p,
1801 struct brw_reg dest,
1802 unsigned function,
1803 unsigned msg_reg_nr,
1804 struct brw_reg src,
1805 unsigned precision )
1806 {
1807 const struct gen_device_info *devinfo = p->devinfo;
1808 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1809 unsigned data_type;
1810 if (has_scalar_region(src)) {
1811 data_type = BRW_MATH_DATA_SCALAR;
1812 } else {
1813 data_type = BRW_MATH_DATA_VECTOR;
1814 }
1815
1816 assert(devinfo->gen < 6);
1817
1818 /* Example code doesn't set predicate_control for send
1819 * instructions.
1820 */
1821 brw_inst_set_pred_control(devinfo, insn, 0);
1822 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1823
1824 brw_set_dest(p, insn, dest);
1825 brw_set_src0(p, insn, src);
1826 brw_set_math_message(p,
1827 insn,
1828 function,
1829 src.type == BRW_REGISTER_TYPE_D,
1830 precision,
1831 data_type);
1832 }
1833
1834 void gen6_math(struct brw_codegen *p,
1835 struct brw_reg dest,
1836 unsigned function,
1837 struct brw_reg src0,
1838 struct brw_reg src1)
1839 {
1840 const struct gen_device_info *devinfo = p->devinfo;
1841 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1842
1843 assert(devinfo->gen >= 6);
1844
1845 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1846 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1847
1848 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1849 if (devinfo->gen == 6) {
1850 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1851 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1852 }
1853
1854 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1855 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1856 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1857 assert(src0.type != BRW_REGISTER_TYPE_F);
1858 assert(src1.type != BRW_REGISTER_TYPE_F);
1859 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1860 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1861 } else {
1862 assert(src0.type == BRW_REGISTER_TYPE_F);
1863 assert(src1.type == BRW_REGISTER_TYPE_F);
1864 }
1865
1866 /* Source modifiers are ignored for extended math instructions on Gen6. */
1867 if (devinfo->gen == 6) {
1868 assert(!src0.negate);
1869 assert(!src0.abs);
1870 assert(!src1.negate);
1871 assert(!src1.abs);
1872 }
1873
1874 brw_inst_set_math_function(devinfo, insn, function);
1875
1876 brw_set_dest(p, insn, dest);
1877 brw_set_src0(p, insn, src0);
1878 brw_set_src1(p, insn, src1);
1879 }
1880
1881 /**
1882 * Return the right surface index to access the thread scratch space using
1883 * stateless dataport messages.
1884 */
1885 unsigned
1886 brw_scratch_surface_idx(const struct brw_codegen *p)
1887 {
1888 /* The scratch space is thread-local so IA coherency is unnecessary. */
1889 if (p->devinfo->gen >= 8)
1890 return GEN8_BTI_STATELESS_NON_COHERENT;
1891 else
1892 return BRW_BTI_STATELESS;
1893 }
1894
1895 /**
1896 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1897 * using a constant offset per channel.
1898 *
1899 * The offset must be aligned to oword size (16 bytes). Used for
1900 * register spilling.
1901 */
1902 void brw_oword_block_write_scratch(struct brw_codegen *p,
1903 struct brw_reg mrf,
1904 int num_regs,
1905 unsigned offset)
1906 {
1907 const struct gen_device_info *devinfo = p->devinfo;
1908 const unsigned target_cache =
1909 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1910 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1911 BRW_SFID_DATAPORT_WRITE);
1912 uint32_t msg_type;
1913
1914 if (devinfo->gen >= 6)
1915 offset /= 16;
1916
1917 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1918
1919 const unsigned mlen = 1 + num_regs;
1920
1921 /* Set up the message header. This is g0, with g0.2 filled with
1922 * the offset. We don't want to leave our offset around in g0 or
1923 * it'll screw up texture samples, so set it up inside the message
1924 * reg.
1925 */
1926 {
1927 brw_push_insn_state(p);
1928 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1929 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1930 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1931
1932 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1933
1934 /* set message header global offset field (reg 0, element 2) */
1935 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1936 brw_MOV(p,
1937 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1938 mrf.nr,
1939 2), BRW_REGISTER_TYPE_UD),
1940 brw_imm_ud(offset));
1941
1942 brw_pop_insn_state(p);
1943 }
1944
1945 {
1946 struct brw_reg dest;
1947 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1948 int send_commit_msg;
1949 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1950 BRW_REGISTER_TYPE_UW);
1951
1952 brw_inst_set_sfid(devinfo, insn, target_cache);
1953 brw_inst_set_compression(devinfo, insn, false);
1954
1955 if (brw_inst_exec_size(devinfo, insn) >= 16)
1956 src_header = vec16(src_header);
1957
1958 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1959 if (devinfo->gen < 6)
1960 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
1961
1962 /* Until gen6, writes followed by reads from the same location
1963 * are not guaranteed to be ordered unless write_commit is set.
1964 * If set, then a no-op write is issued to the destination
1965 * register to set a dependency, and a read from the destination
1966 * can be used to ensure the ordering.
1967 *
1968 * For gen6, only writes between different threads need ordering
1969 * protection. Our use of DP writes is all about register
1970 * spilling within a thread.
1971 */
1972 if (devinfo->gen >= 6) {
1973 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1974 send_commit_msg = 0;
1975 } else {
1976 dest = src_header;
1977 send_commit_msg = 1;
1978 }
1979
1980 brw_set_dest(p, insn, dest);
1981 if (devinfo->gen >= 6) {
1982 brw_set_src0(p, insn, mrf);
1983 } else {
1984 brw_set_src0(p, insn, brw_null_reg());
1985 }
1986
1987 if (devinfo->gen >= 6)
1988 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
1989 else
1990 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
1991
1992 brw_set_desc(p, insn,
1993 brw_message_desc(devinfo, mlen, send_commit_msg, true) |
1994 brw_dp_write_desc(devinfo, brw_scratch_surface_idx(p),
1995 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
1996 msg_type, 0, /* not a render target */
1997 send_commit_msg));
1998 }
1999 }
2000
2001
2002 /**
2003 * Read a block of owords (half a GRF each) from the scratch buffer
2004 * using a constant index per channel.
2005 *
2006 * Offset must be aligned to oword size (16 bytes). Used for register
2007 * spilling.
2008 */
2009 void
2010 brw_oword_block_read_scratch(struct brw_codegen *p,
2011 struct brw_reg dest,
2012 struct brw_reg mrf,
2013 int num_regs,
2014 unsigned offset)
2015 {
2016 const struct gen_device_info *devinfo = p->devinfo;
2017
2018 if (devinfo->gen >= 6)
2019 offset /= 16;
2020
2021 if (p->devinfo->gen >= 7) {
2022 /* On gen 7 and above, we no longer have message registers and we can
2023 * send from any register we want. By using the destination register
2024 * for the message, we guarantee that the implied message write won't
2025 * accidentally overwrite anything. This has been a problem because
2026 * the MRF registers and source for the final FB write are both fixed
2027 * and may overlap.
2028 */
2029 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2030 } else {
2031 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2032 }
2033 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2034
2035 const unsigned rlen = num_regs;
2036 const unsigned target_cache =
2037 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2038 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2039 BRW_SFID_DATAPORT_READ);
2040
2041 {
2042 brw_push_insn_state(p);
2043 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2044 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2045 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2046
2047 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2048
2049 /* set message header global offset field (reg 0, element 2) */
2050 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2051 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2052
2053 brw_pop_insn_state(p);
2054 }
2055
2056 {
2057 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2058
2059 brw_inst_set_sfid(devinfo, insn, target_cache);
2060 assert(brw_inst_pred_control(devinfo, insn) == 0);
2061 brw_inst_set_compression(devinfo, insn, false);
2062
2063 brw_set_dest(p, insn, dest); /* UW? */
2064 if (devinfo->gen >= 6) {
2065 brw_set_src0(p, insn, mrf);
2066 } else {
2067 brw_set_src0(p, insn, brw_null_reg());
2068 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2069 }
2070
2071 brw_set_desc(p, insn,
2072 brw_message_desc(devinfo, 1, rlen, true) |
2073 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2074 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2075 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2076 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2077 }
2078 }
2079
2080 void
2081 gen7_block_read_scratch(struct brw_codegen *p,
2082 struct brw_reg dest,
2083 int num_regs,
2084 unsigned offset)
2085 {
2086 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2087 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2088
2089 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2090
2091 /* The HW requires that the header is present; this is to get the g0.5
2092 * scratch offset.
2093 */
2094 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2095
2096 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2097 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2098 * is 32 bytes, which happens to be the size of a register.
2099 */
2100 offset /= REG_SIZE;
2101 assert(offset < (1 << 12));
2102
2103 gen7_set_dp_scratch_message(p, insn,
2104 false, /* scratch read */
2105 false, /* OWords */
2106 false, /* invalidate after read */
2107 num_regs,
2108 offset,
2109 1, /* mlen: just g0 */
2110 num_regs, /* rlen */
2111 true); /* header present */
2112 }
2113
2114 /**
2115 * Read float[4] vectors from the data port constant cache.
2116 * Location (in buffer) should be a multiple of 16.
2117 * Used for fetching shader constants.
2118 */
2119 void brw_oword_block_read(struct brw_codegen *p,
2120 struct brw_reg dest,
2121 struct brw_reg mrf,
2122 uint32_t offset,
2123 uint32_t bind_table_index)
2124 {
2125 const struct gen_device_info *devinfo = p->devinfo;
2126 const unsigned target_cache =
2127 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2128 BRW_SFID_DATAPORT_READ);
2129 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2130
2131 /* On newer hardware, offset is in units of owords. */
2132 if (devinfo->gen >= 6)
2133 offset /= 16;
2134
2135 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2136
2137 brw_push_insn_state(p);
2138 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2139 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2140 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2141
2142 brw_push_insn_state(p);
2143 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2144 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2145
2146 /* set message header global offset field (reg 0, element 2) */
2147 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2148 brw_MOV(p,
2149 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2150 mrf.nr,
2151 2), BRW_REGISTER_TYPE_UD),
2152 brw_imm_ud(offset));
2153 brw_pop_insn_state(p);
2154
2155 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2156
2157 brw_inst_set_sfid(devinfo, insn, target_cache);
2158
2159 /* cast dest to a uword[8] vector */
2160 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2161
2162 brw_set_dest(p, insn, dest);
2163 if (devinfo->gen >= 6) {
2164 brw_set_src0(p, insn, mrf);
2165 } else {
2166 brw_set_src0(p, insn, brw_null_reg());
2167 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2168 }
2169
2170 brw_set_desc(p, insn,
2171 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2172 brw_dp_read_desc(devinfo, bind_table_index,
2173 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2174 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2175 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2176
2177 brw_pop_insn_state(p);
2178 }
2179
2180 brw_inst *
2181 brw_fb_WRITE(struct brw_codegen *p,
2182 struct brw_reg payload,
2183 struct brw_reg implied_header,
2184 unsigned msg_control,
2185 unsigned binding_table_index,
2186 unsigned msg_length,
2187 unsigned response_length,
2188 bool eot,
2189 bool last_render_target,
2190 bool header_present)
2191 {
2192 const struct gen_device_info *devinfo = p->devinfo;
2193 const unsigned target_cache =
2194 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2195 BRW_SFID_DATAPORT_WRITE);
2196 brw_inst *insn;
2197 unsigned msg_type;
2198 struct brw_reg dest, src0;
2199
2200 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2201 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2202 else
2203 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2204
2205 if (devinfo->gen >= 6) {
2206 insn = next_insn(p, BRW_OPCODE_SENDC);
2207 } else {
2208 insn = next_insn(p, BRW_OPCODE_SEND);
2209 }
2210 brw_inst_set_sfid(devinfo, insn, target_cache);
2211 brw_inst_set_compression(devinfo, insn, false);
2212
2213 if (devinfo->gen >= 6) {
2214 /* headerless version, just submit color payload */
2215 src0 = payload;
2216
2217 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2218 } else {
2219 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2220 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2221 src0 = implied_header;
2222
2223 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2224 }
2225
2226 brw_set_dest(p, insn, dest);
2227 brw_set_src0(p, insn, src0);
2228 brw_set_desc(p, insn,
2229 brw_message_desc(devinfo, msg_length, response_length,
2230 header_present) |
2231 brw_dp_write_desc(devinfo, binding_table_index, msg_control,
2232 msg_type, last_render_target,
2233 0 /* send_commit_msg */));
2234 brw_inst_set_eot(devinfo, insn, eot);
2235
2236 return insn;
2237 }
2238
2239 brw_inst *
2240 gen9_fb_READ(struct brw_codegen *p,
2241 struct brw_reg dst,
2242 struct brw_reg payload,
2243 unsigned binding_table_index,
2244 unsigned msg_length,
2245 unsigned response_length,
2246 bool per_sample)
2247 {
2248 const struct gen_device_info *devinfo = p->devinfo;
2249 assert(devinfo->gen >= 9);
2250 const unsigned msg_subtype =
2251 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2252 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2253
2254 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2255 brw_set_dest(p, insn, dst);
2256 brw_set_src0(p, insn, payload);
2257 brw_set_desc(
2258 p, insn,
2259 brw_message_desc(devinfo, msg_length, response_length, true) |
2260 brw_dp_read_desc(devinfo, binding_table_index,
2261 per_sample << 5 | msg_subtype,
2262 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2263 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2264 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2265
2266 return insn;
2267 }
2268
2269 /**
2270 * Texture sample instruction.
2271 * Note: the msg_type plus msg_length values determine exactly what kind
2272 * of sampling operation is performed. See volume 4, page 161 of docs.
2273 */
2274 void brw_SAMPLE(struct brw_codegen *p,
2275 struct brw_reg dest,
2276 unsigned msg_reg_nr,
2277 struct brw_reg src0,
2278 unsigned binding_table_index,
2279 unsigned sampler,
2280 unsigned msg_type,
2281 unsigned response_length,
2282 unsigned msg_length,
2283 unsigned header_present,
2284 unsigned simd_mode,
2285 unsigned return_format)
2286 {
2287 const struct gen_device_info *devinfo = p->devinfo;
2288 brw_inst *insn;
2289
2290 if (msg_reg_nr != -1)
2291 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2292
2293 insn = next_insn(p, BRW_OPCODE_SEND);
2294 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2295 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2296
2297 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2298 *
2299 * "Instruction compression is not allowed for this instruction (that
2300 * is, send). The hardware behavior is undefined if this instruction is
2301 * set as compressed. However, compress control can be set to "SecHalf"
2302 * to affect the EMask generation."
2303 *
2304 * No similar wording is found in later PRMs, but there are examples
2305 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2306 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2307 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2308 */
2309 brw_inst_set_compression(devinfo, insn, false);
2310
2311 if (devinfo->gen < 6)
2312 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2313
2314 brw_set_dest(p, insn, dest);
2315 brw_set_src0(p, insn, src0);
2316 brw_set_desc(p, insn,
2317 brw_message_desc(devinfo, msg_length, response_length,
2318 header_present) |
2319 brw_sampler_desc(devinfo, binding_table_index, sampler,
2320 msg_type, simd_mode, return_format));
2321 }
2322
2323 /* Adjust the message header's sampler state pointer to
2324 * select the correct group of 16 samplers.
2325 */
2326 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2327 struct brw_reg header,
2328 struct brw_reg sampler_index)
2329 {
2330 /* The "Sampler Index" field can only store values between 0 and 15.
2331 * However, we can add an offset to the "Sampler State Pointer"
2332 * field, effectively selecting a different set of 16 samplers.
2333 *
2334 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2335 * offset, and each sampler state is only 16-bytes, so we can't
2336 * exclusively use the offset - we have to use both.
2337 */
2338
2339 const struct gen_device_info *devinfo = p->devinfo;
2340
2341 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2342 const int sampler_state_size = 16; /* 16 bytes */
2343 uint32_t sampler = sampler_index.ud;
2344
2345 if (sampler >= 16) {
2346 assert(devinfo->is_haswell || devinfo->gen >= 8);
2347 brw_ADD(p,
2348 get_element_ud(header, 3),
2349 get_element_ud(brw_vec8_grf(0, 0), 3),
2350 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2351 }
2352 } else {
2353 /* Non-const sampler array indexing case */
2354 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2355 return;
2356 }
2357
2358 struct brw_reg temp = get_element_ud(header, 3);
2359
2360 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2361 brw_SHL(p, temp, temp, brw_imm_ud(4));
2362 brw_ADD(p,
2363 get_element_ud(header, 3),
2364 get_element_ud(brw_vec8_grf(0, 0), 3),
2365 temp);
2366 }
2367 }
2368
2369 /* All these variables are pretty confusing - we might be better off
2370 * using bitmasks and macros for this, in the old style. Or perhaps
2371 * just having the caller instantiate the fields in dword3 itself.
2372 */
2373 void brw_urb_WRITE(struct brw_codegen *p,
2374 struct brw_reg dest,
2375 unsigned msg_reg_nr,
2376 struct brw_reg src0,
2377 enum brw_urb_write_flags flags,
2378 unsigned msg_length,
2379 unsigned response_length,
2380 unsigned offset,
2381 unsigned swizzle)
2382 {
2383 const struct gen_device_info *devinfo = p->devinfo;
2384 brw_inst *insn;
2385
2386 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2387
2388 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2389 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2390 brw_push_insn_state(p);
2391 brw_set_default_access_mode(p, BRW_ALIGN_1);
2392 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2393 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2394 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2395 BRW_REGISTER_TYPE_UD),
2396 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2397 brw_imm_ud(0xff00));
2398 brw_pop_insn_state(p);
2399 }
2400
2401 insn = next_insn(p, BRW_OPCODE_SEND);
2402
2403 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2404
2405 brw_set_dest(p, insn, dest);
2406 brw_set_src0(p, insn, src0);
2407 brw_set_src1(p, insn, brw_imm_d(0));
2408
2409 if (devinfo->gen < 6)
2410 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2411
2412 brw_set_urb_message(p,
2413 insn,
2414 flags,
2415 msg_length,
2416 response_length,
2417 offset,
2418 swizzle);
2419 }
2420
2421 struct brw_inst *
2422 brw_send_indirect_message(struct brw_codegen *p,
2423 unsigned sfid,
2424 struct brw_reg dst,
2425 struct brw_reg payload,
2426 struct brw_reg desc,
2427 unsigned desc_imm)
2428 {
2429 const struct gen_device_info *devinfo = p->devinfo;
2430 struct brw_inst *send;
2431 int setup;
2432
2433 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2434
2435 assert(desc.type == BRW_REGISTER_TYPE_UD);
2436
2437 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2438 * in the indirect case) by its index in the instruction store. The
2439 * pointer returned by next_insn() may become invalid if emitting the SEND
2440 * in the indirect case reallocs the store.
2441 */
2442
2443 if (desc.file == BRW_IMMEDIATE_VALUE) {
2444 setup = p->nr_insn;
2445 send = next_insn(p, BRW_OPCODE_SEND);
2446 brw_set_desc(p, send, desc.ud | desc_imm);
2447
2448 } else {
2449 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2450
2451 brw_push_insn_state(p);
2452 brw_set_default_access_mode(p, BRW_ALIGN_1);
2453 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2454 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2455 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2456
2457 /* Load the indirect descriptor to an address register using OR so the
2458 * caller can specify additional descriptor bits with the usual
2459 * brw_set_*_message() helper functions.
2460 */
2461 setup = p->nr_insn;
2462 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2463
2464 brw_pop_insn_state(p);
2465
2466 send = next_insn(p, BRW_OPCODE_SEND);
2467 brw_set_src1(p, send, addr);
2468 }
2469
2470 if (dst.width < BRW_EXECUTE_8)
2471 brw_inst_set_exec_size(devinfo, send, dst.width);
2472
2473 brw_set_dest(p, send, dst);
2474 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2475 brw_inst_set_sfid(devinfo, send, sfid);
2476
2477 return &p->store[setup];
2478 }
2479
2480 static struct brw_inst *
2481 brw_send_indirect_surface_message(struct brw_codegen *p,
2482 unsigned sfid,
2483 struct brw_reg dst,
2484 struct brw_reg payload,
2485 struct brw_reg surface,
2486 unsigned desc_imm)
2487 {
2488 struct brw_inst *insn;
2489
2490 if (surface.file != BRW_IMMEDIATE_VALUE) {
2491 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2492
2493 brw_push_insn_state(p);
2494 brw_set_default_access_mode(p, BRW_ALIGN_1);
2495 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2496 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2497 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2498
2499 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2500 * some surface array is accessed out of bounds.
2501 */
2502 insn = brw_AND(p, addr,
2503 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2504 BRW_GET_SWZ(surface.swizzle, 0)),
2505 brw_imm_ud(0xff));
2506
2507 brw_pop_insn_state(p);
2508
2509 surface = addr;
2510 }
2511
2512 insn = brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm);
2513
2514 return insn;
2515 }
2516
2517 static bool
2518 while_jumps_before_offset(const struct gen_device_info *devinfo,
2519 brw_inst *insn, int while_offset, int start_offset)
2520 {
2521 int scale = 16 / brw_jump_scale(devinfo);
2522 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2523 : brw_inst_jip(devinfo, insn);
2524 assert(jip < 0);
2525 return while_offset + jip * scale <= start_offset;
2526 }
2527
2528
2529 static int
2530 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2531 {
2532 int offset;
2533 void *store = p->store;
2534 const struct gen_device_info *devinfo = p->devinfo;
2535
2536 int depth = 0;
2537
2538 for (offset = next_offset(devinfo, store, start_offset);
2539 offset < p->next_insn_offset;
2540 offset = next_offset(devinfo, store, offset)) {
2541 brw_inst *insn = store + offset;
2542
2543 switch (brw_inst_opcode(devinfo, insn)) {
2544 case BRW_OPCODE_IF:
2545 depth++;
2546 break;
2547 case BRW_OPCODE_ENDIF:
2548 if (depth == 0)
2549 return offset;
2550 depth--;
2551 break;
2552 case BRW_OPCODE_WHILE:
2553 /* If the while doesn't jump before our instruction, it's the end
2554 * of a sibling do...while loop. Ignore it.
2555 */
2556 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2557 continue;
2558 /* fallthrough */
2559 case BRW_OPCODE_ELSE:
2560 case BRW_OPCODE_HALT:
2561 if (depth == 0)
2562 return offset;
2563 }
2564 }
2565
2566 return 0;
2567 }
2568
2569 /* There is no DO instruction on gen6, so to find the end of the loop
2570 * we have to see if the loop is jumping back before our start
2571 * instruction.
2572 */
2573 static int
2574 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2575 {
2576 const struct gen_device_info *devinfo = p->devinfo;
2577 int offset;
2578 void *store = p->store;
2579
2580 assert(devinfo->gen >= 6);
2581
2582 /* Always start after the instruction (such as a WHILE) we're trying to fix
2583 * up.
2584 */
2585 for (offset = next_offset(devinfo, store, start_offset);
2586 offset < p->next_insn_offset;
2587 offset = next_offset(devinfo, store, offset)) {
2588 brw_inst *insn = store + offset;
2589
2590 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2591 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2592 return offset;
2593 }
2594 }
2595 assert(!"not reached");
2596 return start_offset;
2597 }
2598
2599 /* After program generation, go back and update the UIP and JIP of
2600 * BREAK, CONT, and HALT instructions to their correct locations.
2601 */
2602 void
2603 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2604 {
2605 const struct gen_device_info *devinfo = p->devinfo;
2606 int offset;
2607 int br = brw_jump_scale(devinfo);
2608 int scale = 16 / br;
2609 void *store = p->store;
2610
2611 if (devinfo->gen < 6)
2612 return;
2613
2614 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2615 brw_inst *insn = store + offset;
2616 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2617
2618 int block_end_offset = brw_find_next_block_end(p, offset);
2619 switch (brw_inst_opcode(devinfo, insn)) {
2620 case BRW_OPCODE_BREAK:
2621 assert(block_end_offset != 0);
2622 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2623 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2624 brw_inst_set_uip(devinfo, insn,
2625 (brw_find_loop_end(p, offset) - offset +
2626 (devinfo->gen == 6 ? 16 : 0)) / scale);
2627 break;
2628 case BRW_OPCODE_CONTINUE:
2629 assert(block_end_offset != 0);
2630 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2631 brw_inst_set_uip(devinfo, insn,
2632 (brw_find_loop_end(p, offset) - offset) / scale);
2633
2634 assert(brw_inst_uip(devinfo, insn) != 0);
2635 assert(brw_inst_jip(devinfo, insn) != 0);
2636 break;
2637
2638 case BRW_OPCODE_ENDIF: {
2639 int32_t jump = (block_end_offset == 0) ?
2640 1 * br : (block_end_offset - offset) / scale;
2641 if (devinfo->gen >= 7)
2642 brw_inst_set_jip(devinfo, insn, jump);
2643 else
2644 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2645 break;
2646 }
2647
2648 case BRW_OPCODE_HALT:
2649 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2650 *
2651 * "In case of the halt instruction not inside any conditional
2652 * code block, the value of <JIP> and <UIP> should be the
2653 * same. In case of the halt instruction inside conditional code
2654 * block, the <UIP> should be the end of the program, and the
2655 * <JIP> should be end of the most inner conditional code block."
2656 *
2657 * The uip will have already been set by whoever set up the
2658 * instruction.
2659 */
2660 if (block_end_offset == 0) {
2661 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2662 } else {
2663 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2664 }
2665 assert(brw_inst_uip(devinfo, insn) != 0);
2666 assert(brw_inst_jip(devinfo, insn) != 0);
2667 break;
2668 }
2669 }
2670 }
2671
2672 void brw_ff_sync(struct brw_codegen *p,
2673 struct brw_reg dest,
2674 unsigned msg_reg_nr,
2675 struct brw_reg src0,
2676 bool allocate,
2677 unsigned response_length,
2678 bool eot)
2679 {
2680 const struct gen_device_info *devinfo = p->devinfo;
2681 brw_inst *insn;
2682
2683 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2684
2685 insn = next_insn(p, BRW_OPCODE_SEND);
2686 brw_set_dest(p, insn, dest);
2687 brw_set_src0(p, insn, src0);
2688 brw_set_src1(p, insn, brw_imm_d(0));
2689
2690 if (devinfo->gen < 6)
2691 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2692
2693 brw_set_ff_sync_message(p,
2694 insn,
2695 allocate,
2696 response_length,
2697 eot);
2698 }
2699
2700 /**
2701 * Emit the SEND instruction necessary to generate stream output data on Gen6
2702 * (for transform feedback).
2703 *
2704 * If send_commit_msg is true, this is the last piece of stream output data
2705 * from this thread, so send the data as a committed write. According to the
2706 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2707 *
2708 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2709 * writes are complete by sending the final write as a committed write."
2710 */
2711 void
2712 brw_svb_write(struct brw_codegen *p,
2713 struct brw_reg dest,
2714 unsigned msg_reg_nr,
2715 struct brw_reg src0,
2716 unsigned binding_table_index,
2717 bool send_commit_msg)
2718 {
2719 const struct gen_device_info *devinfo = p->devinfo;
2720 const unsigned target_cache =
2721 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2722 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2723 BRW_SFID_DATAPORT_WRITE);
2724 brw_inst *insn;
2725
2726 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2727
2728 insn = next_insn(p, BRW_OPCODE_SEND);
2729 brw_inst_set_sfid(devinfo, insn, target_cache);
2730 brw_set_dest(p, insn, dest);
2731 brw_set_src0(p, insn, src0);
2732 brw_set_desc(p, insn,
2733 brw_message_desc(devinfo, 1, send_commit_msg, true) |
2734 brw_dp_write_desc(devinfo, binding_table_index,
2735 0, /* msg_control: ignored */
2736 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2737 0, /* last_render_target: ignored */
2738 send_commit_msg)); /* send_commit_msg */
2739 }
2740
2741 static unsigned
2742 brw_surface_payload_size(struct brw_codegen *p,
2743 unsigned num_channels,
2744 bool has_simd4x2,
2745 bool has_simd16)
2746 {
2747 if (has_simd4x2 && brw_get_default_access_mode(p) == BRW_ALIGN_16)
2748 return 1;
2749 else if (has_simd16 && brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2750 return 2 * num_channels;
2751 else
2752 return num_channels;
2753 }
2754
2755 static void
2756 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2757 brw_inst *insn,
2758 unsigned atomic_op,
2759 bool response_expected)
2760 {
2761 const struct gen_device_info *devinfo = p->devinfo;
2762 unsigned msg_control =
2763 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2764 (response_expected ? 1 << 5 : 0); /* Return data expected */
2765
2766 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2767 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2768 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2769 msg_control |= 1 << 4; /* SIMD8 mode */
2770
2771 brw_inst_set_dp_msg_type(devinfo, insn,
2772 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2773 } else {
2774 brw_inst_set_dp_msg_type(devinfo, insn,
2775 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2776 }
2777 } else {
2778 brw_inst_set_dp_msg_type(devinfo, insn,
2779 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2780
2781 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2782 msg_control |= 1 << 4; /* SIMD8 mode */
2783 }
2784
2785 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2786 }
2787
2788 void
2789 brw_untyped_atomic(struct brw_codegen *p,
2790 struct brw_reg dst,
2791 struct brw_reg payload,
2792 struct brw_reg surface,
2793 unsigned atomic_op,
2794 unsigned msg_length,
2795 bool response_expected,
2796 bool header_present)
2797 {
2798 const struct gen_device_info *devinfo = p->devinfo;
2799 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2800 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2801 GEN7_SFID_DATAPORT_DATA_CACHE);
2802 const unsigned response_length = brw_surface_payload_size(
2803 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, true);
2804 const unsigned desc =
2805 brw_message_desc(devinfo, msg_length, response_length, header_present);
2806 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2807 /* Mask out unused components -- This is especially important in Align16
2808 * mode on generations that don't have native support for SIMD4x2 atomics,
2809 * because unused but enabled components will cause the dataport to perform
2810 * additional atomic operations on the addresses that happen to be in the
2811 * uninitialized Y, Z and W coordinates of the payload.
2812 */
2813 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2814 struct brw_inst *insn = brw_send_indirect_surface_message(
2815 p, sfid, brw_writemask(dst, mask), payload, surface, desc);
2816
2817 brw_set_dp_untyped_atomic_message(
2818 p, insn, atomic_op, response_expected);
2819 }
2820
2821 static void
2822 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2823 struct brw_inst *insn,
2824 unsigned num_channels)
2825 {
2826 const struct gen_device_info *devinfo = p->devinfo;
2827 /* Set mask of 32-bit channels to drop. */
2828 unsigned msg_control = 0xf & (0xf << num_channels);
2829
2830 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2831 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2832 msg_control |= 1 << 4; /* SIMD16 mode */
2833 else
2834 msg_control |= 2 << 4; /* SIMD8 mode */
2835 }
2836
2837 brw_inst_set_dp_msg_type(devinfo, insn,
2838 (devinfo->gen >= 8 || devinfo->is_haswell ?
2839 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2840 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
2841 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2842 }
2843
2844 void
2845 brw_untyped_surface_read(struct brw_codegen *p,
2846 struct brw_reg dst,
2847 struct brw_reg payload,
2848 struct brw_reg surface,
2849 unsigned msg_length,
2850 unsigned num_channels)
2851 {
2852 const struct gen_device_info *devinfo = p->devinfo;
2853 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2854 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2855 GEN7_SFID_DATAPORT_DATA_CACHE);
2856 const unsigned response_length =
2857 brw_surface_payload_size(p, num_channels, true, true);
2858 const unsigned desc =
2859 brw_message_desc(devinfo, msg_length, response_length, false);
2860 struct brw_inst *insn = brw_send_indirect_surface_message(
2861 p, sfid, dst, payload, surface, desc);
2862
2863 brw_set_dp_untyped_surface_read_message(
2864 p, insn, num_channels);
2865 }
2866
2867 static void
2868 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
2869 struct brw_inst *insn,
2870 unsigned num_channels)
2871 {
2872 const struct gen_device_info *devinfo = p->devinfo;
2873 /* Set mask of 32-bit channels to drop. */
2874 unsigned msg_control = 0xf & (0xf << num_channels);
2875
2876 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2877 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2878 msg_control |= 1 << 4; /* SIMD16 mode */
2879 else
2880 msg_control |= 2 << 4; /* SIMD8 mode */
2881 } else {
2882 if (devinfo->gen >= 8 || devinfo->is_haswell)
2883 msg_control |= 0 << 4; /* SIMD4x2 mode */
2884 else
2885 msg_control |= 2 << 4; /* SIMD8 mode */
2886 }
2887
2888 brw_inst_set_dp_msg_type(devinfo, insn,
2889 devinfo->gen >= 8 || devinfo->is_haswell ?
2890 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2891 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2892 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2893 }
2894
2895 void
2896 brw_untyped_surface_write(struct brw_codegen *p,
2897 struct brw_reg payload,
2898 struct brw_reg surface,
2899 unsigned msg_length,
2900 unsigned num_channels,
2901 bool header_present)
2902 {
2903 const struct gen_device_info *devinfo = p->devinfo;
2904 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2905 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2906 GEN7_SFID_DATAPORT_DATA_CACHE);
2907 const unsigned desc =
2908 brw_message_desc(devinfo, msg_length, 0, header_present);
2909 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2910 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2911 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2912 WRITEMASK_X : WRITEMASK_XYZW;
2913 struct brw_inst *insn = brw_send_indirect_surface_message(
2914 p, sfid, brw_writemask(brw_null_reg(), mask),
2915 payload, surface, desc);
2916
2917 brw_set_dp_untyped_surface_write_message(
2918 p, insn, num_channels);
2919 }
2920
2921 static unsigned
2922 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
2923 {
2924 switch (bit_size) {
2925 case 8:
2926 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
2927 case 16:
2928 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
2929 case 32:
2930 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
2931 default:
2932 unreachable("Unsupported bit_size for byte scattered messages");
2933 }
2934 }
2935
2936
2937 void
2938 brw_byte_scattered_read(struct brw_codegen *p,
2939 struct brw_reg dst,
2940 struct brw_reg payload,
2941 struct brw_reg surface,
2942 unsigned msg_length,
2943 unsigned bit_size)
2944 {
2945 const struct gen_device_info *devinfo = p->devinfo;
2946 assert(devinfo->gen > 7 || devinfo->is_haswell);
2947 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2948 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
2949 const unsigned response_length =
2950 brw_surface_payload_size(p, 1, true, true);
2951 const unsigned desc =
2952 brw_message_desc(devinfo, msg_length, response_length, false);
2953
2954 struct brw_inst *insn = brw_send_indirect_surface_message(
2955 p, sfid, dst, payload, surface, desc);
2956
2957 unsigned msg_control =
2958 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
2959
2960 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2961 msg_control |= 1; /* SIMD16 mode */
2962 else
2963 msg_control |= 0; /* SIMD8 mode */
2964
2965 brw_inst_set_dp_msg_type(devinfo, insn,
2966 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
2967 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2968 }
2969
2970 void
2971 brw_byte_scattered_write(struct brw_codegen *p,
2972 struct brw_reg payload,
2973 struct brw_reg surface,
2974 unsigned msg_length,
2975 unsigned bit_size,
2976 bool header_present)
2977 {
2978 const struct gen_device_info *devinfo = p->devinfo;
2979 assert(devinfo->gen > 7 || devinfo->is_haswell);
2980 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2981 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
2982 const unsigned desc =
2983 brw_message_desc(devinfo, msg_length, 0, header_present);
2984
2985 struct brw_inst *insn = brw_send_indirect_surface_message(
2986 p, sfid, brw_writemask(brw_null_reg(), WRITEMASK_XYZW),
2987 payload, surface, desc);
2988
2989 unsigned msg_control =
2990 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
2991
2992 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2993 msg_control |= 1;
2994 else
2995 msg_control |= 0;
2996
2997 brw_inst_set_dp_msg_type(devinfo, insn,
2998 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
2999 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3000 }
3001
3002 static void
3003 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3004 struct brw_inst *insn,
3005 unsigned atomic_op,
3006 bool response_expected)
3007 {
3008 const struct gen_device_info *devinfo = p->devinfo;
3009 unsigned msg_control =
3010 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3011 (response_expected ? 1 << 5 : 0); /* Return data expected */
3012
3013 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3014 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3015 if ((brw_get_default_group(p) / 8) % 2 == 1)
3016 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3017
3018 brw_inst_set_dp_msg_type(devinfo, insn,
3019 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3020 } else {
3021 brw_inst_set_dp_msg_type(devinfo, insn,
3022 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3023 }
3024
3025 } else {
3026 brw_inst_set_dp_msg_type(devinfo, insn,
3027 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3028
3029 if ((brw_get_default_group(p) / 8) % 2 == 1)
3030 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3031 }
3032
3033 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3034 }
3035
3036 void
3037 brw_typed_atomic(struct brw_codegen *p,
3038 struct brw_reg dst,
3039 struct brw_reg payload,
3040 struct brw_reg surface,
3041 unsigned atomic_op,
3042 unsigned msg_length,
3043 bool response_expected,
3044 bool header_present) {
3045 const struct gen_device_info *devinfo = p->devinfo;
3046 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3047 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3048 GEN6_SFID_DATAPORT_RENDER_CACHE);
3049 const unsigned response_length = brw_surface_payload_size(
3050 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, false);
3051 const unsigned desc =
3052 brw_message_desc(devinfo, msg_length, response_length, header_present);
3053 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3054 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3055 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3056 struct brw_inst *insn = brw_send_indirect_surface_message(
3057 p, sfid, brw_writemask(dst, mask), payload, surface, desc);
3058
3059 brw_set_dp_typed_atomic_message(
3060 p, insn, atomic_op, response_expected);
3061 }
3062
3063 static void
3064 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3065 struct brw_inst *insn,
3066 unsigned num_channels)
3067 {
3068 const struct gen_device_info *devinfo = p->devinfo;
3069 /* Set mask of unused channels. */
3070 unsigned msg_control = 0xf & (0xf << num_channels);
3071
3072 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3073 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3074 if ((brw_get_default_group(p) / 8) % 2 == 1)
3075 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3076 else
3077 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3078 }
3079
3080 brw_inst_set_dp_msg_type(devinfo, insn,
3081 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3082 } else {
3083 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3084 if ((brw_get_default_group(p) / 8) % 2 == 1)
3085 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3086 }
3087
3088 brw_inst_set_dp_msg_type(devinfo, insn,
3089 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3090 }
3091
3092 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3093 }
3094
3095 void
3096 brw_typed_surface_read(struct brw_codegen *p,
3097 struct brw_reg dst,
3098 struct brw_reg payload,
3099 struct brw_reg surface,
3100 unsigned msg_length,
3101 unsigned num_channels,
3102 bool header_present)
3103 {
3104 const struct gen_device_info *devinfo = p->devinfo;
3105 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3106 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3107 GEN6_SFID_DATAPORT_RENDER_CACHE);
3108 const unsigned response_length = brw_surface_payload_size(
3109 p, num_channels, devinfo->gen >= 8 || devinfo->is_haswell, false);
3110 const unsigned desc =
3111 brw_message_desc(devinfo, msg_length, response_length, header_present);
3112 struct brw_inst *insn = brw_send_indirect_surface_message(
3113 p, sfid, dst, payload, surface, desc);
3114
3115 brw_set_dp_typed_surface_read_message(
3116 p, insn, num_channels);
3117 }
3118
3119 static void
3120 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3121 struct brw_inst *insn,
3122 unsigned num_channels)
3123 {
3124 const struct gen_device_info *devinfo = p->devinfo;
3125 /* Set mask of unused channels. */
3126 unsigned msg_control = 0xf & (0xf << num_channels);
3127
3128 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3129 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3130 if ((brw_get_default_group(p) / 8) % 2 == 1)
3131 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3132 else
3133 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3134 }
3135
3136 brw_inst_set_dp_msg_type(devinfo, insn,
3137 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3138
3139 } else {
3140 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3141 if ((brw_get_default_group(p) / 8) % 2 == 1)
3142 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3143 }
3144
3145 brw_inst_set_dp_msg_type(devinfo, insn,
3146 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3147 }
3148
3149 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3150 }
3151
3152 void
3153 brw_typed_surface_write(struct brw_codegen *p,
3154 struct brw_reg payload,
3155 struct brw_reg surface,
3156 unsigned msg_length,
3157 unsigned num_channels,
3158 bool header_present)
3159 {
3160 const struct gen_device_info *devinfo = p->devinfo;
3161 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3162 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3163 GEN6_SFID_DATAPORT_RENDER_CACHE);
3164 const unsigned desc =
3165 brw_message_desc(devinfo, msg_length, 0, header_present);
3166 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3167 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3168 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3169 WRITEMASK_X : WRITEMASK_XYZW);
3170 struct brw_inst *insn = brw_send_indirect_surface_message(
3171 p, sfid, brw_writemask(brw_null_reg(), mask),
3172 payload, surface, desc);
3173
3174 brw_set_dp_typed_surface_write_message(
3175 p, insn, num_channels);
3176 }
3177
3178 static void
3179 brw_set_memory_fence_message(struct brw_codegen *p,
3180 struct brw_inst *insn,
3181 enum brw_message_target sfid,
3182 bool commit_enable)
3183 {
3184 const struct gen_device_info *devinfo = p->devinfo;
3185
3186 brw_set_desc(p, insn, brw_message_desc(
3187 devinfo, 1, (commit_enable ? 1 : 0), true));
3188
3189 brw_inst_set_sfid(devinfo, insn, sfid);
3190
3191 switch (sfid) {
3192 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3193 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3194 break;
3195 case GEN7_SFID_DATAPORT_DATA_CACHE:
3196 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3197 break;
3198 default:
3199 unreachable("Not reached");
3200 }
3201
3202 if (commit_enable)
3203 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3204 }
3205
3206 void
3207 brw_memory_fence(struct brw_codegen *p,
3208 struct brw_reg dst,
3209 enum opcode send_op)
3210 {
3211 const struct gen_device_info *devinfo = p->devinfo;
3212 const bool commit_enable =
3213 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3214 (devinfo->gen == 7 && !devinfo->is_haswell);
3215 struct brw_inst *insn;
3216
3217 brw_push_insn_state(p);
3218 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3219 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3220 dst = vec1(dst);
3221
3222 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3223 * message doesn't write anything back.
3224 */
3225 insn = next_insn(p, send_op);
3226 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3227 brw_set_dest(p, insn, dst);
3228 brw_set_src0(p, insn, dst);
3229 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3230 commit_enable);
3231
3232 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3233 /* IVB does typed surface access through the render cache, so we need to
3234 * flush it too. Use a different register so both flushes can be
3235 * pipelined by the hardware.
3236 */
3237 insn = next_insn(p, send_op);
3238 brw_set_dest(p, insn, offset(dst, 1));
3239 brw_set_src0(p, insn, offset(dst, 1));
3240 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3241 commit_enable);
3242
3243 /* Now write the response of the second message into the response of the
3244 * first to trigger a pipeline stall -- This way future render and data
3245 * cache messages will be properly ordered with respect to past data and
3246 * render cache messages.
3247 */
3248 brw_MOV(p, dst, offset(dst, 1));
3249 }
3250
3251 brw_pop_insn_state(p);
3252 }
3253
3254 void
3255 brw_pixel_interpolator_query(struct brw_codegen *p,
3256 struct brw_reg dest,
3257 struct brw_reg mrf,
3258 bool noperspective,
3259 unsigned mode,
3260 struct brw_reg data,
3261 unsigned msg_length,
3262 unsigned response_length)
3263 {
3264 const struct gen_device_info *devinfo = p->devinfo;
3265 const uint16_t exec_size = brw_get_default_exec_size(p);
3266 const unsigned slot_group = brw_get_default_group(p) / 16;
3267 const unsigned simd_mode = (exec_size == BRW_EXECUTE_16);
3268 const unsigned desc =
3269 brw_message_desc(devinfo, msg_length, response_length, false) |
3270 brw_pixel_interp_desc(devinfo, mode, noperspective, simd_mode,
3271 slot_group);
3272
3273 /* brw_send_indirect_message will automatically use a direct send message
3274 * if data is actually immediate.
3275 */
3276 brw_send_indirect_message(p,
3277 GEN7_SFID_PIXEL_INTERPOLATOR,
3278 dest,
3279 mrf,
3280 vec1(data),
3281 desc);
3282 }
3283
3284 void
3285 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3286 struct brw_reg mask)
3287 {
3288 const struct gen_device_info *devinfo = p->devinfo;
3289 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3290 const unsigned qtr_control = brw_get_default_group(p) / 8;
3291 brw_inst *inst;
3292
3293 assert(devinfo->gen >= 7);
3294 assert(mask.type == BRW_REGISTER_TYPE_UD);
3295
3296 brw_push_insn_state(p);
3297
3298 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3299 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3300
3301 if (devinfo->gen >= 8) {
3302 /* Getting the first active channel index is easy on Gen8: Just find
3303 * the first bit set in the execution mask. The register exists on
3304 * HSW already but it reads back as all ones when the current
3305 * instruction has execution masking disabled, so it's kind of
3306 * useless.
3307 */
3308 struct brw_reg exec_mask =
3309 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3310
3311 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3312 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3313 /* Unfortunately, ce0 does not take into account the thread
3314 * dispatch mask, which may be a problem in cases where it's not
3315 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3316 * some n). Combine ce0 with the given dispatch (or vector) mask
3317 * to mask off those channels which were never dispatched by the
3318 * hardware.
3319 */
3320 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3321 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3322 exec_mask = vec1(dst);
3323 }
3324
3325 /* Quarter control has the effect of magically shifting the value of
3326 * ce0 so you'll get the first active channel relative to the
3327 * specified quarter control as result.
3328 */
3329 inst = brw_FBL(p, vec1(dst), exec_mask);
3330 } else {
3331 const struct brw_reg flag = brw_flag_reg(p->current->flag_subreg / 2,
3332 p->current->flag_subreg % 2);
3333
3334 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3335 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3336
3337 /* Run enough instructions returning zero with execution masking and
3338 * a conditional modifier enabled in order to get the full execution
3339 * mask in f1.0. We could use a single 32-wide move here if it
3340 * weren't because of the hardware bug that causes channel enables to
3341 * be applied incorrectly to the second half of 32-wide instructions
3342 * on Gen7.
3343 */
3344 const unsigned lower_size = MIN2(16, exec_size);
3345 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3346 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3347 brw_imm_uw(0));
3348 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3349 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3350 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3351 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3352 }
3353
3354 /* Find the first bit set in the exec_size-wide portion of the flag
3355 * register that was updated by the last sequence of MOV
3356 * instructions.
3357 */
3358 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3359 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3360 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3361 }
3362 } else {
3363 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3364
3365 if (devinfo->gen >= 8 &&
3366 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3367 /* In SIMD4x2 mode the first active channel index is just the
3368 * negation of the first bit of the mask register. Note that ce0
3369 * doesn't take into account the dispatch mask, so the Gen7 path
3370 * should be used instead unless you have the guarantee that the
3371 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3372 * for some n).
3373 */
3374 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3375 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3376 brw_imm_ud(1));
3377
3378 } else {
3379 /* Overwrite the destination without and with execution masking to
3380 * find out which of the channels is active.
3381 */
3382 brw_push_insn_state(p);
3383 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3384 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3385 brw_imm_ud(1));
3386
3387 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3388 brw_imm_ud(0));
3389 brw_pop_insn_state(p);
3390 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3391 }
3392 }
3393
3394 brw_pop_insn_state(p);
3395 }
3396
3397 void
3398 brw_broadcast(struct brw_codegen *p,
3399 struct brw_reg dst,
3400 struct brw_reg src,
3401 struct brw_reg idx)
3402 {
3403 const struct gen_device_info *devinfo = p->devinfo;
3404 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3405 brw_inst *inst;
3406
3407 brw_push_insn_state(p);
3408 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3409 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3410
3411 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3412 src.address_mode == BRW_ADDRESS_DIRECT);
3413 assert(!src.abs && !src.negate);
3414 assert(src.type == dst.type);
3415
3416 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3417 idx.file == BRW_IMMEDIATE_VALUE) {
3418 /* Trivial, the source is already uniform or the index is a constant.
3419 * We will typically not get here if the optimizer is doing its job, but
3420 * asserting would be mean.
3421 */
3422 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3423 brw_MOV(p, dst,
3424 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3425 stride(suboffset(src, 4 * i), 0, 4, 1)));
3426 } else {
3427 /* From the Haswell PRM section "Register Region Restrictions":
3428 *
3429 * "The lower bits of the AddressImmediate must not overflow to
3430 * change the register address. The lower 5 bits of Address
3431 * Immediate when added to lower 5 bits of address register gives
3432 * the sub-register offset. The upper bits of Address Immediate
3433 * when added to upper bits of address register gives the register
3434 * address. Any overflow from sub-register offset is dropped."
3435 *
3436 * Fortunately, for broadcast, we never have a sub-register offset so
3437 * this isn't an issue.
3438 */
3439 assert(src.subnr == 0);
3440
3441 if (align1) {
3442 const struct brw_reg addr =
3443 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3444 unsigned offset = src.nr * REG_SIZE + src.subnr;
3445 /* Limit in bytes of the signed indirect addressing immediate. */
3446 const unsigned limit = 512;
3447
3448 brw_push_insn_state(p);
3449 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3450 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3451
3452 /* Take into account the component size and horizontal stride. */
3453 assert(src.vstride == src.hstride + src.width);
3454 brw_SHL(p, addr, vec1(idx),
3455 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3456 src.hstride - 1));
3457
3458 /* We can only address up to limit bytes using the indirect
3459 * addressing immediate, account for the difference if the source
3460 * register is above this limit.
3461 */
3462 if (offset >= limit) {
3463 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3464 offset = offset % limit;
3465 }
3466
3467 brw_pop_insn_state(p);
3468
3469 /* Use indirect addressing to fetch the specified component. */
3470 if (type_sz(src.type) > 4 &&
3471 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3472 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3473 *
3474 * "When source or destination datatype is 64b or operation is
3475 * integer DWord multiply, indirect addressing must not be
3476 * used."
3477 *
3478 * To work around both of this issue, we do two integer MOVs
3479 * insead of one 64-bit MOV. Because no double value should ever
3480 * cross a register boundary, it's safe to use the immediate
3481 * offset in the indirect here to handle adding 4 bytes to the
3482 * offset and avoid the extra ADD to the register file.
3483 */
3484 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3485 retype(brw_vec1_indirect(addr.subnr, offset),
3486 BRW_REGISTER_TYPE_D));
3487 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3488 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3489 BRW_REGISTER_TYPE_D));
3490 } else {
3491 brw_MOV(p, dst,
3492 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3493 }
3494 } else {
3495 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3496 * to all bits of a flag register,
3497 */
3498 inst = brw_MOV(p,
3499 brw_null_reg(),
3500 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3501 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3502 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3503 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3504
3505 /* and use predicated SEL to pick the right channel. */
3506 inst = brw_SEL(p, dst,
3507 stride(suboffset(src, 4), 4, 4, 1),
3508 stride(src, 4, 4, 1));
3509 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3510 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3511 }
3512 }
3513
3514 brw_pop_insn_state(p);
3515 }
3516
3517 /**
3518 * This instruction is generated as a single-channel align1 instruction by
3519 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3520 *
3521 * We can't use the typed atomic op in the FS because that has the execution
3522 * mask ANDed with the pixel mask, but we just want to write the one dword for
3523 * all the pixels.
3524 *
3525 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3526 * one u32. So we use the same untyped atomic write message as the pixel
3527 * shader.
3528 *
3529 * The untyped atomic operation requires a BUFFER surface type with RAW
3530 * format, and is only accessible through the legacy DATA_CACHE dataport
3531 * messages.
3532 */
3533 void brw_shader_time_add(struct brw_codegen *p,
3534 struct brw_reg payload,
3535 uint32_t surf_index)
3536 {
3537 const struct gen_device_info *devinfo = p->devinfo;
3538 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3539 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3540 GEN7_SFID_DATAPORT_DATA_CACHE);
3541 assert(devinfo->gen >= 7);
3542
3543 brw_push_insn_state(p);
3544 brw_set_default_access_mode(p, BRW_ALIGN_1);
3545 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3546 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3547 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3548
3549 /* We use brw_vec1_reg and unmasked because we want to increment the given
3550 * offset only once.
3551 */
3552 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3553 BRW_ARF_NULL, 0));
3554 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3555 payload.nr, 0));
3556 brw_set_src1(p, send, brw_imm_ud(0));
3557 brw_set_desc(p, send, brw_message_desc(devinfo, 2, 0, false));
3558 brw_inst_set_sfid(devinfo, send, sfid);
3559 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3560 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3561
3562 brw_pop_insn_state(p);
3563 }
3564
3565
3566 /**
3567 * Emit the SEND message for a barrier
3568 */
3569 void
3570 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3571 {
3572 const struct gen_device_info *devinfo = p->devinfo;
3573 struct brw_inst *inst;
3574
3575 assert(devinfo->gen >= 7);
3576
3577 brw_push_insn_state(p);
3578 brw_set_default_access_mode(p, BRW_ALIGN_1);
3579 inst = next_insn(p, BRW_OPCODE_SEND);
3580 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3581 brw_set_src0(p, inst, src);
3582 brw_set_src1(p, inst, brw_null_reg());
3583 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3584
3585 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3586 brw_inst_set_gateway_notify(devinfo, inst, 1);
3587 brw_inst_set_gateway_subfuncid(devinfo, inst,
3588 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3589
3590 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3591 brw_pop_insn_state(p);
3592 }
3593
3594
3595 /**
3596 * Emit the wait instruction for a barrier
3597 */
3598 void
3599 brw_WAIT(struct brw_codegen *p)
3600 {
3601 const struct gen_device_info *devinfo = p->devinfo;
3602 struct brw_inst *insn;
3603
3604 struct brw_reg src = brw_notification_reg();
3605
3606 insn = next_insn(p, BRW_OPCODE_WAIT);
3607 brw_set_dest(p, insn, src);
3608 brw_set_src0(p, insn, src);
3609 brw_set_src1(p, insn, brw_null_reg());
3610
3611 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3612 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3613 }
3614
3615 /**
3616 * Changes the floating point rounding mode updating the control register
3617 * field defined at cr0.0[5-6] bits. This function supports the changes to
3618 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3619 * Only RTNE and RTZ rounding are enabled at nir.
3620 */
3621 void
3622 brw_rounding_mode(struct brw_codegen *p,
3623 enum brw_rnd_mode mode)
3624 {
3625 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3626
3627 if (bits != BRW_CR0_RND_MODE_MASK) {
3628 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3629 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3630 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3631
3632 /* From the Skylake PRM, Volume 7, page 760:
3633 * "Implementation Restriction on Register Access: When the control
3634 * register is used as an explicit source and/or destination, hardware
3635 * does not ensure execution pipeline coherency. Software must set the
3636 * thread control field to ‘switch’ for an instruction that uses
3637 * control register as an explicit operand."
3638 */
3639 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3640 }
3641
3642 if (bits) {
3643 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3644 brw_imm_ud(bits));
3645 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3646 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3647 }
3648 }