intel/eu: Stop overriding exec sizes in send_indirect_message
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file == BRW_GENERAL_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file == BRW_GENERAL_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file == BRW_GENERAL_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Specify the descriptor and extended descriptor immediate for a SEND(C)
364 * message instruction.
365 */
366 void
367 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
368 unsigned desc, unsigned ex_desc)
369 {
370 const struct gen_device_info *devinfo = p->devinfo;
371 assert(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
372 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC);
373 brw_inst_set_src1_file_type(devinfo, inst,
374 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_UD);
375 brw_inst_set_send_desc(devinfo, inst, desc);
376 if (devinfo->gen >= 9)
377 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
378 }
379
380 static void brw_set_math_message( struct brw_codegen *p,
381 brw_inst *inst,
382 unsigned function,
383 unsigned integer_type,
384 bool low_precision,
385 unsigned dataType )
386 {
387 const struct gen_device_info *devinfo = p->devinfo;
388 unsigned msg_length;
389 unsigned response_length;
390
391 /* Infer message length from the function */
392 switch (function) {
393 case BRW_MATH_FUNCTION_POW:
394 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
395 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
396 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
397 msg_length = 2;
398 break;
399 default:
400 msg_length = 1;
401 break;
402 }
403
404 /* Infer response length from the function */
405 switch (function) {
406 case BRW_MATH_FUNCTION_SINCOS:
407 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
408 response_length = 2;
409 break;
410 default:
411 response_length = 1;
412 break;
413 }
414
415 brw_set_desc(p, inst, brw_message_desc(
416 devinfo, msg_length, response_length, false));
417
418 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
419 brw_inst_set_math_msg_function(devinfo, inst, function);
420 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
421 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
422 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
423 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
424 brw_inst_set_saturate(devinfo, inst, 0);
425 }
426
427
428 static void brw_set_ff_sync_message(struct brw_codegen *p,
429 brw_inst *insn,
430 bool allocate,
431 unsigned response_length,
432 bool end_of_thread)
433 {
434 const struct gen_device_info *devinfo = p->devinfo;
435
436 brw_set_desc(p, insn, brw_message_desc(
437 devinfo, 1, response_length, true));
438
439 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
440 brw_inst_set_eot(devinfo, insn, end_of_thread);
441 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
442 brw_inst_set_urb_allocate(devinfo, insn, allocate);
443 /* The following fields are not used by FF_SYNC: */
444 brw_inst_set_urb_global_offset(devinfo, insn, 0);
445 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
446 brw_inst_set_urb_used(devinfo, insn, 0);
447 brw_inst_set_urb_complete(devinfo, insn, 0);
448 }
449
450 static void brw_set_urb_message( struct brw_codegen *p,
451 brw_inst *insn,
452 enum brw_urb_write_flags flags,
453 unsigned msg_length,
454 unsigned response_length,
455 unsigned offset,
456 unsigned swizzle_control )
457 {
458 const struct gen_device_info *devinfo = p->devinfo;
459
460 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
461 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
462 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
463
464 brw_set_desc(p, insn, brw_message_desc(
465 devinfo, msg_length, response_length, true));
466
467 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
468 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
469
470 if (flags & BRW_URB_WRITE_OWORD) {
471 assert(msg_length == 2); /* header + one OWORD of data */
472 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
473 } else {
474 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
475 }
476
477 brw_inst_set_urb_global_offset(devinfo, insn, offset);
478 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
479
480 if (devinfo->gen < 8) {
481 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
482 }
483
484 if (devinfo->gen < 7) {
485 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
486 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
487 } else {
488 brw_inst_set_urb_per_slot_offset(devinfo, insn,
489 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
490 }
491 }
492
493 static void
494 gen7_set_dp_scratch_message(struct brw_codegen *p,
495 brw_inst *inst,
496 bool write,
497 bool dword,
498 bool invalidate_after_read,
499 unsigned num_regs,
500 unsigned addr_offset,
501 unsigned mlen,
502 unsigned rlen,
503 bool header_present)
504 {
505 const struct gen_device_info *devinfo = p->devinfo;
506 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
507 (devinfo->gen >= 8 && num_regs == 8));
508 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
509 num_regs - 1);
510
511 brw_set_desc(p, inst, brw_message_desc(
512 devinfo, mlen, rlen, header_present));
513
514 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
515 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
516 brw_inst_set_scratch_read_write(devinfo, inst, write);
517 brw_inst_set_scratch_type(devinfo, inst, dword);
518 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
519 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
520 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
521 }
522
523 static void
524 brw_inst_set_state(const struct gen_device_info *devinfo,
525 brw_inst *insn,
526 const struct brw_insn_state *state)
527 {
528 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
529 brw_inst_set_group(devinfo, insn, state->group);
530 brw_inst_set_compression(devinfo, insn, state->compressed);
531 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
532 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
533 brw_inst_set_saturate(devinfo, insn, state->saturate);
534 brw_inst_set_pred_control(devinfo, insn, state->predicate);
535 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
536
537 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
538 state->access_mode == BRW_ALIGN_16) {
539 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
540 if (devinfo->gen >= 7)
541 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
542 } else {
543 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
544 if (devinfo->gen >= 7)
545 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
546 }
547
548 if (devinfo->gen >= 6)
549 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
550 }
551
552 #define next_insn brw_next_insn
553 brw_inst *
554 brw_next_insn(struct brw_codegen *p, unsigned opcode)
555 {
556 const struct gen_device_info *devinfo = p->devinfo;
557 brw_inst *insn;
558
559 if (p->nr_insn + 1 > p->store_size) {
560 p->store_size <<= 1;
561 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
562 }
563
564 p->next_insn_offset += 16;
565 insn = &p->store[p->nr_insn++];
566
567 memset(insn, 0, sizeof(*insn));
568 brw_inst_set_opcode(devinfo, insn, opcode);
569
570 /* Apply the default instruction state */
571 brw_inst_set_state(devinfo, insn, p->current);
572
573 return insn;
574 }
575
576 static brw_inst *
577 brw_alu1(struct brw_codegen *p, unsigned opcode,
578 struct brw_reg dest, struct brw_reg src)
579 {
580 brw_inst *insn = next_insn(p, opcode);
581 brw_set_dest(p, insn, dest);
582 brw_set_src0(p, insn, src);
583 return insn;
584 }
585
586 static brw_inst *
587 brw_alu2(struct brw_codegen *p, unsigned opcode,
588 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
589 {
590 /* 64-bit immediates are only supported on 1-src instructions */
591 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
592 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
593
594 brw_inst *insn = next_insn(p, opcode);
595 brw_set_dest(p, insn, dest);
596 brw_set_src0(p, insn, src0);
597 brw_set_src1(p, insn, src1);
598 return insn;
599 }
600
601 static int
602 get_3src_subreg_nr(struct brw_reg reg)
603 {
604 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
605 * use 32-bit units (components 0..7). Since they only support F/D/UD
606 * types, this doesn't lose any flexibility, but uses fewer bits.
607 */
608 return reg.subnr / 4;
609 }
610
611 static enum gen10_align1_3src_vertical_stride
612 to_3src_align1_vstride(enum brw_vertical_stride vstride)
613 {
614 switch (vstride) {
615 case BRW_VERTICAL_STRIDE_0:
616 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
617 case BRW_VERTICAL_STRIDE_2:
618 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
619 case BRW_VERTICAL_STRIDE_4:
620 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
621 case BRW_VERTICAL_STRIDE_8:
622 case BRW_VERTICAL_STRIDE_16:
623 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
624 default:
625 unreachable("invalid vstride");
626 }
627 }
628
629
630 static enum gen10_align1_3src_src_horizontal_stride
631 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
632 {
633 switch (hstride) {
634 case BRW_HORIZONTAL_STRIDE_0:
635 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
636 case BRW_HORIZONTAL_STRIDE_1:
637 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
638 case BRW_HORIZONTAL_STRIDE_2:
639 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
640 case BRW_HORIZONTAL_STRIDE_4:
641 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
642 default:
643 unreachable("invalid hstride");
644 }
645 }
646
647 static brw_inst *
648 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
649 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
650 {
651 const struct gen_device_info *devinfo = p->devinfo;
652 brw_inst *inst = next_insn(p, opcode);
653
654 gen7_convert_mrf_to_grf(p, &dest);
655
656 assert(dest.nr < 128);
657 assert(src0.file != BRW_IMMEDIATE_VALUE || src0.nr < 128);
658 assert(src1.file != BRW_IMMEDIATE_VALUE || src1.nr < 128);
659 assert(src2.file != BRW_IMMEDIATE_VALUE || src2.nr < 128);
660 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
661 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
662 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
663 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
664
665 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
666 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
667 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
668
669 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
670 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
671 BRW_ALIGN1_3SRC_ACCUMULATOR);
672 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
673 } else {
674 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
675 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
676 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
677 }
678 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
679
680 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
681
682 if (brw_reg_type_is_floating_point(dest.type)) {
683 brw_inst_set_3src_a1_exec_type(devinfo, inst,
684 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
685 } else {
686 brw_inst_set_3src_a1_exec_type(devinfo, inst,
687 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
688 }
689
690 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
691 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
692 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
693 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
694
695 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
696 to_3src_align1_vstride(src0.vstride));
697 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
698 to_3src_align1_vstride(src1.vstride));
699 /* no vstride on src2 */
700
701 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
702 to_3src_align1_hstride(src0.hstride));
703 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
704 to_3src_align1_hstride(src1.hstride));
705 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
706 to_3src_align1_hstride(src2.hstride));
707
708 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
709 if (src0.type == BRW_REGISTER_TYPE_NF) {
710 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
711 } else {
712 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
713 }
714 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
715 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
716
717 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
718 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
719 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
720 } else {
721 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
722 }
723 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
724 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
725
726 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
727 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
728 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
729 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
730
731 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
732 src0.file == BRW_IMMEDIATE_VALUE ||
733 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
734 src0.type == BRW_REGISTER_TYPE_NF));
735 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
736 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
737 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
738 src2.file == BRW_IMMEDIATE_VALUE);
739
740 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
741 src0.file == BRW_GENERAL_REGISTER_FILE ?
742 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
743 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
744 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
745 src1.file == BRW_GENERAL_REGISTER_FILE ?
746 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
747 BRW_ALIGN1_3SRC_ACCUMULATOR);
748 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
749 src2.file == BRW_GENERAL_REGISTER_FILE ?
750 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
751 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
752 } else {
753 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
754 dest.file == BRW_MESSAGE_REGISTER_FILE);
755 assert(dest.type == BRW_REGISTER_TYPE_F ||
756 dest.type == BRW_REGISTER_TYPE_DF ||
757 dest.type == BRW_REGISTER_TYPE_D ||
758 dest.type == BRW_REGISTER_TYPE_UD);
759 if (devinfo->gen == 6) {
760 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
761 dest.file == BRW_MESSAGE_REGISTER_FILE);
762 }
763 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
764 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
765 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
766
767 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
768 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
769 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
770 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
771 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
772 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
773 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
774 src0.vstride == BRW_VERTICAL_STRIDE_0);
775
776 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
777 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
778 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
779 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
780 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
781 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
782 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
783 src1.vstride == BRW_VERTICAL_STRIDE_0);
784
785 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
786 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
787 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
788 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
789 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
790 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
791 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
792 src2.vstride == BRW_VERTICAL_STRIDE_0);
793
794 if (devinfo->gen >= 7) {
795 /* Set both the source and destination types based on dest.type,
796 * ignoring the source register types. The MAD and LRP emitters ensure
797 * that all four types are float. The BFE and BFI2 emitters, however,
798 * may send us mixed D and UD types and want us to ignore that and use
799 * the destination type.
800 */
801 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
802 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
803 }
804 }
805
806 return inst;
807 }
808
809
810 /***********************************************************************
811 * Convenience routines.
812 */
813 #define ALU1(OP) \
814 brw_inst *brw_##OP(struct brw_codegen *p, \
815 struct brw_reg dest, \
816 struct brw_reg src0) \
817 { \
818 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
819 }
820
821 #define ALU2(OP) \
822 brw_inst *brw_##OP(struct brw_codegen *p, \
823 struct brw_reg dest, \
824 struct brw_reg src0, \
825 struct brw_reg src1) \
826 { \
827 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
828 }
829
830 #define ALU3(OP) \
831 brw_inst *brw_##OP(struct brw_codegen *p, \
832 struct brw_reg dest, \
833 struct brw_reg src0, \
834 struct brw_reg src1, \
835 struct brw_reg src2) \
836 { \
837 if (p->current->access_mode == BRW_ALIGN_16) { \
838 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
839 src0.swizzle = BRW_SWIZZLE_XXXX; \
840 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
841 src1.swizzle = BRW_SWIZZLE_XXXX; \
842 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
843 src2.swizzle = BRW_SWIZZLE_XXXX; \
844 } \
845 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
846 }
847
848 #define ALU3F(OP) \
849 brw_inst *brw_##OP(struct brw_codegen *p, \
850 struct brw_reg dest, \
851 struct brw_reg src0, \
852 struct brw_reg src1, \
853 struct brw_reg src2) \
854 { \
855 assert(dest.type == BRW_REGISTER_TYPE_F || \
856 dest.type == BRW_REGISTER_TYPE_DF); \
857 if (dest.type == BRW_REGISTER_TYPE_F) { \
858 assert(src0.type == BRW_REGISTER_TYPE_F); \
859 assert(src1.type == BRW_REGISTER_TYPE_F); \
860 assert(src2.type == BRW_REGISTER_TYPE_F); \
861 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
862 assert(src0.type == BRW_REGISTER_TYPE_DF); \
863 assert(src1.type == BRW_REGISTER_TYPE_DF); \
864 assert(src2.type == BRW_REGISTER_TYPE_DF); \
865 } \
866 \
867 if (p->current->access_mode == BRW_ALIGN_16) { \
868 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
869 src0.swizzle = BRW_SWIZZLE_XXXX; \
870 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
871 src1.swizzle = BRW_SWIZZLE_XXXX; \
872 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
873 src2.swizzle = BRW_SWIZZLE_XXXX; \
874 } \
875 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
876 }
877
878 /* Rounding operations (other than RNDD) require two instructions - the first
879 * stores a rounded value (possibly the wrong way) in the dest register, but
880 * also sets a per-channel "increment bit" in the flag register. A predicated
881 * add of 1.0 fixes dest to contain the desired result.
882 *
883 * Sandybridge and later appear to round correctly without an ADD.
884 */
885 #define ROUND(OP) \
886 void brw_##OP(struct brw_codegen *p, \
887 struct brw_reg dest, \
888 struct brw_reg src) \
889 { \
890 const struct gen_device_info *devinfo = p->devinfo; \
891 brw_inst *rnd, *add; \
892 rnd = next_insn(p, BRW_OPCODE_##OP); \
893 brw_set_dest(p, rnd, dest); \
894 brw_set_src0(p, rnd, src); \
895 \
896 if (devinfo->gen < 6) { \
897 /* turn on round-increments */ \
898 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
899 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
900 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
901 } \
902 }
903
904
905 ALU2(SEL)
906 ALU1(NOT)
907 ALU2(AND)
908 ALU2(OR)
909 ALU2(XOR)
910 ALU2(SHR)
911 ALU2(SHL)
912 ALU1(DIM)
913 ALU2(ASR)
914 ALU3(CSEL)
915 ALU1(FRC)
916 ALU1(RNDD)
917 ALU2(MAC)
918 ALU2(MACH)
919 ALU1(LZD)
920 ALU2(DP4)
921 ALU2(DPH)
922 ALU2(DP3)
923 ALU2(DP2)
924 ALU3(MAD)
925 ALU3F(LRP)
926 ALU1(BFREV)
927 ALU3(BFE)
928 ALU2(BFI1)
929 ALU3(BFI2)
930 ALU1(FBH)
931 ALU1(FBL)
932 ALU1(CBIT)
933 ALU2(ADDC)
934 ALU2(SUBB)
935
936 ROUND(RNDZ)
937 ROUND(RNDE)
938
939 brw_inst *
940 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
941 {
942 const struct gen_device_info *devinfo = p->devinfo;
943
944 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
945 * To avoid the problems that causes, we use an <X,2,0> source region to
946 * read each element twice.
947 */
948 if (devinfo->gen == 7 && !devinfo->is_haswell &&
949 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
950 dest.type == BRW_REGISTER_TYPE_DF &&
951 (src0.type == BRW_REGISTER_TYPE_F ||
952 src0.type == BRW_REGISTER_TYPE_D ||
953 src0.type == BRW_REGISTER_TYPE_UD) &&
954 !has_scalar_region(src0)) {
955 assert(src0.vstride == src0.width + src0.hstride);
956 src0.vstride = src0.hstride;
957 src0.width = BRW_WIDTH_2;
958 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
959 }
960
961 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
962 }
963
964 brw_inst *
965 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
966 struct brw_reg src0, struct brw_reg src1)
967 {
968 /* 6.2.2: add */
969 if (src0.type == BRW_REGISTER_TYPE_F ||
970 (src0.file == BRW_IMMEDIATE_VALUE &&
971 src0.type == BRW_REGISTER_TYPE_VF)) {
972 assert(src1.type != BRW_REGISTER_TYPE_UD);
973 assert(src1.type != BRW_REGISTER_TYPE_D);
974 }
975
976 if (src1.type == BRW_REGISTER_TYPE_F ||
977 (src1.file == BRW_IMMEDIATE_VALUE &&
978 src1.type == BRW_REGISTER_TYPE_VF)) {
979 assert(src0.type != BRW_REGISTER_TYPE_UD);
980 assert(src0.type != BRW_REGISTER_TYPE_D);
981 }
982
983 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
984 }
985
986 brw_inst *
987 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
988 struct brw_reg src0, struct brw_reg src1)
989 {
990 assert(dest.type == src0.type);
991 assert(src0.type == src1.type);
992 switch (src0.type) {
993 case BRW_REGISTER_TYPE_B:
994 case BRW_REGISTER_TYPE_UB:
995 case BRW_REGISTER_TYPE_W:
996 case BRW_REGISTER_TYPE_UW:
997 case BRW_REGISTER_TYPE_D:
998 case BRW_REGISTER_TYPE_UD:
999 break;
1000 default:
1001 unreachable("Bad type for brw_AVG");
1002 }
1003
1004 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1005 }
1006
1007 brw_inst *
1008 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1009 struct brw_reg src0, struct brw_reg src1)
1010 {
1011 /* 6.32.38: mul */
1012 if (src0.type == BRW_REGISTER_TYPE_D ||
1013 src0.type == BRW_REGISTER_TYPE_UD ||
1014 src1.type == BRW_REGISTER_TYPE_D ||
1015 src1.type == BRW_REGISTER_TYPE_UD) {
1016 assert(dest.type != BRW_REGISTER_TYPE_F);
1017 }
1018
1019 if (src0.type == BRW_REGISTER_TYPE_F ||
1020 (src0.file == BRW_IMMEDIATE_VALUE &&
1021 src0.type == BRW_REGISTER_TYPE_VF)) {
1022 assert(src1.type != BRW_REGISTER_TYPE_UD);
1023 assert(src1.type != BRW_REGISTER_TYPE_D);
1024 }
1025
1026 if (src1.type == BRW_REGISTER_TYPE_F ||
1027 (src1.file == BRW_IMMEDIATE_VALUE &&
1028 src1.type == BRW_REGISTER_TYPE_VF)) {
1029 assert(src0.type != BRW_REGISTER_TYPE_UD);
1030 assert(src0.type != BRW_REGISTER_TYPE_D);
1031 }
1032
1033 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1034 src0.nr != BRW_ARF_ACCUMULATOR);
1035 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1036 src1.nr != BRW_ARF_ACCUMULATOR);
1037
1038 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1039 }
1040
1041 brw_inst *
1042 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1043 struct brw_reg src0, struct brw_reg src1)
1044 {
1045 src0.vstride = BRW_VERTICAL_STRIDE_0;
1046 src0.width = BRW_WIDTH_1;
1047 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1048 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1049 }
1050
1051 brw_inst *
1052 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1053 struct brw_reg src0, struct brw_reg src1)
1054 {
1055 src0.vstride = BRW_VERTICAL_STRIDE_0;
1056 src0.width = BRW_WIDTH_1;
1057 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1058 src1.vstride = BRW_VERTICAL_STRIDE_8;
1059 src1.width = BRW_WIDTH_8;
1060 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1061 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1062 }
1063
1064 brw_inst *
1065 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1066 {
1067 const struct gen_device_info *devinfo = p->devinfo;
1068 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1069 /* The F32TO16 instruction doesn't support 32-bit destination types in
1070 * Align1 mode, and neither does the Gen8 implementation in terms of a
1071 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1072 * an undocumented feature.
1073 */
1074 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1075 (!align16 || devinfo->gen >= 8));
1076 brw_inst *inst;
1077
1078 if (align16) {
1079 assert(dst.type == BRW_REGISTER_TYPE_UD);
1080 } else {
1081 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1082 dst.type == BRW_REGISTER_TYPE_W ||
1083 dst.type == BRW_REGISTER_TYPE_UW ||
1084 dst.type == BRW_REGISTER_TYPE_HF);
1085 }
1086
1087 brw_push_insn_state(p);
1088
1089 if (needs_zero_fill) {
1090 brw_set_default_access_mode(p, BRW_ALIGN_1);
1091 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1092 }
1093
1094 if (devinfo->gen >= 8) {
1095 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1096 } else {
1097 assert(devinfo->gen == 7);
1098 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1099 }
1100
1101 if (needs_zero_fill) {
1102 brw_inst_set_no_dd_clear(devinfo, inst, true);
1103 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1104 brw_inst_set_no_dd_check(devinfo, inst, true);
1105 }
1106
1107 brw_pop_insn_state(p);
1108 return inst;
1109 }
1110
1111 brw_inst *
1112 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1113 {
1114 const struct gen_device_info *devinfo = p->devinfo;
1115 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1116
1117 if (align16) {
1118 assert(src.type == BRW_REGISTER_TYPE_UD);
1119 } else {
1120 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1121 *
1122 * Because this instruction does not have a 16-bit floating-point
1123 * type, the source data type must be Word (W). The destination type
1124 * must be F (Float).
1125 */
1126 if (src.type == BRW_REGISTER_TYPE_UD)
1127 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1128
1129 assert(src.type == BRW_REGISTER_TYPE_W ||
1130 src.type == BRW_REGISTER_TYPE_UW ||
1131 src.type == BRW_REGISTER_TYPE_HF);
1132 }
1133
1134 if (devinfo->gen >= 8) {
1135 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1136 } else {
1137 assert(devinfo->gen == 7);
1138 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1139 }
1140 }
1141
1142
1143 void brw_NOP(struct brw_codegen *p)
1144 {
1145 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1146 memset(insn, 0, sizeof(*insn));
1147 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1148 }
1149
1150
1151
1152
1153
1154 /***********************************************************************
1155 * Comparisons, if/else/endif
1156 */
1157
1158 brw_inst *
1159 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1160 unsigned predicate_control)
1161 {
1162 const struct gen_device_info *devinfo = p->devinfo;
1163 struct brw_reg ip = brw_ip_reg();
1164 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1165
1166 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1167 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1168 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1169 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1170
1171 return inst;
1172 }
1173
1174 static void
1175 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1176 {
1177 p->if_stack[p->if_stack_depth] = inst - p->store;
1178
1179 p->if_stack_depth++;
1180 if (p->if_stack_array_size <= p->if_stack_depth) {
1181 p->if_stack_array_size *= 2;
1182 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1183 p->if_stack_array_size);
1184 }
1185 }
1186
1187 static brw_inst *
1188 pop_if_stack(struct brw_codegen *p)
1189 {
1190 p->if_stack_depth--;
1191 return &p->store[p->if_stack[p->if_stack_depth]];
1192 }
1193
1194 static void
1195 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1196 {
1197 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1198 p->loop_stack_array_size *= 2;
1199 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1200 p->loop_stack_array_size);
1201 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1202 p->loop_stack_array_size);
1203 }
1204
1205 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1206 p->loop_stack_depth++;
1207 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1208 }
1209
1210 static brw_inst *
1211 get_inner_do_insn(struct brw_codegen *p)
1212 {
1213 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1214 }
1215
1216 /* EU takes the value from the flag register and pushes it onto some
1217 * sort of a stack (presumably merging with any flag value already on
1218 * the stack). Within an if block, the flags at the top of the stack
1219 * control execution on each channel of the unit, eg. on each of the
1220 * 16 pixel values in our wm programs.
1221 *
1222 * When the matching 'else' instruction is reached (presumably by
1223 * countdown of the instruction count patched in by our ELSE/ENDIF
1224 * functions), the relevant flags are inverted.
1225 *
1226 * When the matching 'endif' instruction is reached, the flags are
1227 * popped off. If the stack is now empty, normal execution resumes.
1228 */
1229 brw_inst *
1230 brw_IF(struct brw_codegen *p, unsigned execute_size)
1231 {
1232 const struct gen_device_info *devinfo = p->devinfo;
1233 brw_inst *insn;
1234
1235 insn = next_insn(p, BRW_OPCODE_IF);
1236
1237 /* Override the defaults for this instruction:
1238 */
1239 if (devinfo->gen < 6) {
1240 brw_set_dest(p, insn, brw_ip_reg());
1241 brw_set_src0(p, insn, brw_ip_reg());
1242 brw_set_src1(p, insn, brw_imm_d(0x0));
1243 } else if (devinfo->gen == 6) {
1244 brw_set_dest(p, insn, brw_imm_w(0));
1245 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1246 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1247 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1248 } else if (devinfo->gen == 7) {
1249 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1250 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1251 brw_set_src1(p, insn, brw_imm_w(0));
1252 brw_inst_set_jip(devinfo, insn, 0);
1253 brw_inst_set_uip(devinfo, insn, 0);
1254 } else {
1255 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1256 brw_set_src0(p, insn, brw_imm_d(0));
1257 brw_inst_set_jip(devinfo, insn, 0);
1258 brw_inst_set_uip(devinfo, insn, 0);
1259 }
1260
1261 brw_inst_set_exec_size(devinfo, insn, execute_size);
1262 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1263 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1264 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1265 if (!p->single_program_flow && devinfo->gen < 6)
1266 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1267
1268 push_if_stack(p, insn);
1269 p->if_depth_in_loop[p->loop_stack_depth]++;
1270 return insn;
1271 }
1272
1273 /* This function is only used for gen6-style IF instructions with an
1274 * embedded comparison (conditional modifier). It is not used on gen7.
1275 */
1276 brw_inst *
1277 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1278 struct brw_reg src0, struct brw_reg src1)
1279 {
1280 const struct gen_device_info *devinfo = p->devinfo;
1281 brw_inst *insn;
1282
1283 insn = next_insn(p, BRW_OPCODE_IF);
1284
1285 brw_set_dest(p, insn, brw_imm_w(0));
1286 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1287 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1288 brw_set_src0(p, insn, src0);
1289 brw_set_src1(p, insn, src1);
1290
1291 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1292 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1293 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1294
1295 push_if_stack(p, insn);
1296 return insn;
1297 }
1298
1299 /**
1300 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1301 */
1302 static void
1303 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1304 brw_inst *if_inst, brw_inst *else_inst)
1305 {
1306 const struct gen_device_info *devinfo = p->devinfo;
1307
1308 /* The next instruction (where the ENDIF would be, if it existed) */
1309 brw_inst *next_inst = &p->store[p->nr_insn];
1310
1311 assert(p->single_program_flow);
1312 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1313 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1314 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1315
1316 /* Convert IF to an ADD instruction that moves the instruction pointer
1317 * to the first instruction of the ELSE block. If there is no ELSE
1318 * block, point to where ENDIF would be. Reverse the predicate.
1319 *
1320 * There's no need to execute an ENDIF since we don't need to do any
1321 * stack operations, and if we're currently executing, we just want to
1322 * continue normally.
1323 */
1324 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1325 brw_inst_set_pred_inv(devinfo, if_inst, true);
1326
1327 if (else_inst != NULL) {
1328 /* Convert ELSE to an ADD instruction that points where the ENDIF
1329 * would be.
1330 */
1331 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1332
1333 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1334 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1335 } else {
1336 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1337 }
1338 }
1339
1340 /**
1341 * Patch IF and ELSE instructions with appropriate jump targets.
1342 */
1343 static void
1344 patch_IF_ELSE(struct brw_codegen *p,
1345 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1346 {
1347 const struct gen_device_info *devinfo = p->devinfo;
1348
1349 /* We shouldn't be patching IF and ELSE instructions in single program flow
1350 * mode when gen < 6, because in single program flow mode on those
1351 * platforms, we convert flow control instructions to conditional ADDs that
1352 * operate on IP (see brw_ENDIF).
1353 *
1354 * However, on Gen6, writing to IP doesn't work in single program flow mode
1355 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1356 * not be updated by non-flow control instructions."). And on later
1357 * platforms, there is no significant benefit to converting control flow
1358 * instructions to conditional ADDs. So we do patch IF and ELSE
1359 * instructions in single program flow mode on those platforms.
1360 */
1361 if (devinfo->gen < 6)
1362 assert(!p->single_program_flow);
1363
1364 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1365 assert(endif_inst != NULL);
1366 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1367
1368 unsigned br = brw_jump_scale(devinfo);
1369
1370 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1371 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1372
1373 if (else_inst == NULL) {
1374 /* Patch IF -> ENDIF */
1375 if (devinfo->gen < 6) {
1376 /* Turn it into an IFF, which means no mask stack operations for
1377 * all-false and jumping past the ENDIF.
1378 */
1379 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1380 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1381 br * (endif_inst - if_inst + 1));
1382 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1383 } else if (devinfo->gen == 6) {
1384 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1385 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1386 } else {
1387 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1388 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1389 }
1390 } else {
1391 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1392
1393 /* Patch IF -> ELSE */
1394 if (devinfo->gen < 6) {
1395 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1396 br * (else_inst - if_inst));
1397 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1398 } else if (devinfo->gen == 6) {
1399 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1400 br * (else_inst - if_inst + 1));
1401 }
1402
1403 /* Patch ELSE -> ENDIF */
1404 if (devinfo->gen < 6) {
1405 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1406 * matching ENDIF.
1407 */
1408 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1409 br * (endif_inst - else_inst + 1));
1410 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1411 } else if (devinfo->gen == 6) {
1412 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1413 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1414 br * (endif_inst - else_inst));
1415 } else {
1416 /* The IF instruction's JIP should point just past the ELSE */
1417 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1418 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1419 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1420 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1421 if (devinfo->gen >= 8) {
1422 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1423 * should point to ENDIF.
1424 */
1425 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1426 }
1427 }
1428 }
1429 }
1430
1431 void
1432 brw_ELSE(struct brw_codegen *p)
1433 {
1434 const struct gen_device_info *devinfo = p->devinfo;
1435 brw_inst *insn;
1436
1437 insn = next_insn(p, BRW_OPCODE_ELSE);
1438
1439 if (devinfo->gen < 6) {
1440 brw_set_dest(p, insn, brw_ip_reg());
1441 brw_set_src0(p, insn, brw_ip_reg());
1442 brw_set_src1(p, insn, brw_imm_d(0x0));
1443 } else if (devinfo->gen == 6) {
1444 brw_set_dest(p, insn, brw_imm_w(0));
1445 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1446 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1447 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1448 } else if (devinfo->gen == 7) {
1449 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1450 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1451 brw_set_src1(p, insn, brw_imm_w(0));
1452 brw_inst_set_jip(devinfo, insn, 0);
1453 brw_inst_set_uip(devinfo, insn, 0);
1454 } else {
1455 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1456 brw_set_src0(p, insn, brw_imm_d(0));
1457 brw_inst_set_jip(devinfo, insn, 0);
1458 brw_inst_set_uip(devinfo, insn, 0);
1459 }
1460
1461 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1462 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1463 if (!p->single_program_flow && devinfo->gen < 6)
1464 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1465
1466 push_if_stack(p, insn);
1467 }
1468
1469 void
1470 brw_ENDIF(struct brw_codegen *p)
1471 {
1472 const struct gen_device_info *devinfo = p->devinfo;
1473 brw_inst *insn = NULL;
1474 brw_inst *else_inst = NULL;
1475 brw_inst *if_inst = NULL;
1476 brw_inst *tmp;
1477 bool emit_endif = true;
1478
1479 /* In single program flow mode, we can express IF and ELSE instructions
1480 * equivalently as ADD instructions that operate on IP. On platforms prior
1481 * to Gen6, flow control instructions cause an implied thread switch, so
1482 * this is a significant savings.
1483 *
1484 * However, on Gen6, writing to IP doesn't work in single program flow mode
1485 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1486 * not be updated by non-flow control instructions."). And on later
1487 * platforms, there is no significant benefit to converting control flow
1488 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1489 * Gen5.
1490 */
1491 if (devinfo->gen < 6 && p->single_program_flow)
1492 emit_endif = false;
1493
1494 /*
1495 * A single next_insn() may change the base address of instruction store
1496 * memory(p->store), so call it first before referencing the instruction
1497 * store pointer from an index
1498 */
1499 if (emit_endif)
1500 insn = next_insn(p, BRW_OPCODE_ENDIF);
1501
1502 /* Pop the IF and (optional) ELSE instructions from the stack */
1503 p->if_depth_in_loop[p->loop_stack_depth]--;
1504 tmp = pop_if_stack(p);
1505 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1506 else_inst = tmp;
1507 tmp = pop_if_stack(p);
1508 }
1509 if_inst = tmp;
1510
1511 if (!emit_endif) {
1512 /* ENDIF is useless; don't bother emitting it. */
1513 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1514 return;
1515 }
1516
1517 if (devinfo->gen < 6) {
1518 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1519 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1520 brw_set_src1(p, insn, brw_imm_d(0x0));
1521 } else if (devinfo->gen == 6) {
1522 brw_set_dest(p, insn, brw_imm_w(0));
1523 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1524 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1525 } else if (devinfo->gen == 7) {
1526 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1527 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1528 brw_set_src1(p, insn, brw_imm_w(0));
1529 } else {
1530 brw_set_src0(p, insn, brw_imm_d(0));
1531 }
1532
1533 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1534 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1535 if (devinfo->gen < 6)
1536 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1537
1538 /* Also pop item off the stack in the endif instruction: */
1539 if (devinfo->gen < 6) {
1540 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1541 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1542 } else if (devinfo->gen == 6) {
1543 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1544 } else {
1545 brw_inst_set_jip(devinfo, insn, 2);
1546 }
1547 patch_IF_ELSE(p, if_inst, else_inst, insn);
1548 }
1549
1550 brw_inst *
1551 brw_BREAK(struct brw_codegen *p)
1552 {
1553 const struct gen_device_info *devinfo = p->devinfo;
1554 brw_inst *insn;
1555
1556 insn = next_insn(p, BRW_OPCODE_BREAK);
1557 if (devinfo->gen >= 8) {
1558 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1559 brw_set_src0(p, insn, brw_imm_d(0x0));
1560 } else if (devinfo->gen >= 6) {
1561 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1562 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1563 brw_set_src1(p, insn, brw_imm_d(0x0));
1564 } else {
1565 brw_set_dest(p, insn, brw_ip_reg());
1566 brw_set_src0(p, insn, brw_ip_reg());
1567 brw_set_src1(p, insn, brw_imm_d(0x0));
1568 brw_inst_set_gen4_pop_count(devinfo, insn,
1569 p->if_depth_in_loop[p->loop_stack_depth]);
1570 }
1571 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1572 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1573
1574 return insn;
1575 }
1576
1577 brw_inst *
1578 brw_CONT(struct brw_codegen *p)
1579 {
1580 const struct gen_device_info *devinfo = p->devinfo;
1581 brw_inst *insn;
1582
1583 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1584 brw_set_dest(p, insn, brw_ip_reg());
1585 if (devinfo->gen >= 8) {
1586 brw_set_src0(p, insn, brw_imm_d(0x0));
1587 } else {
1588 brw_set_src0(p, insn, brw_ip_reg());
1589 brw_set_src1(p, insn, brw_imm_d(0x0));
1590 }
1591
1592 if (devinfo->gen < 6) {
1593 brw_inst_set_gen4_pop_count(devinfo, insn,
1594 p->if_depth_in_loop[p->loop_stack_depth]);
1595 }
1596 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1597 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1598 return insn;
1599 }
1600
1601 brw_inst *
1602 gen6_HALT(struct brw_codegen *p)
1603 {
1604 const struct gen_device_info *devinfo = p->devinfo;
1605 brw_inst *insn;
1606
1607 insn = next_insn(p, BRW_OPCODE_HALT);
1608 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1609 if (devinfo->gen >= 8) {
1610 brw_set_src0(p, insn, brw_imm_d(0x0));
1611 } else {
1612 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1613 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1614 }
1615
1616 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1617 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1618 return insn;
1619 }
1620
1621 /* DO/WHILE loop:
1622 *
1623 * The DO/WHILE is just an unterminated loop -- break or continue are
1624 * used for control within the loop. We have a few ways they can be
1625 * done.
1626 *
1627 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1628 * jip and no DO instruction.
1629 *
1630 * For non-uniform control flow pre-gen6, there's a DO instruction to
1631 * push the mask, and a WHILE to jump back, and BREAK to get out and
1632 * pop the mask.
1633 *
1634 * For gen6, there's no more mask stack, so no need for DO. WHILE
1635 * just points back to the first instruction of the loop.
1636 */
1637 brw_inst *
1638 brw_DO(struct brw_codegen *p, unsigned execute_size)
1639 {
1640 const struct gen_device_info *devinfo = p->devinfo;
1641
1642 if (devinfo->gen >= 6 || p->single_program_flow) {
1643 push_loop_stack(p, &p->store[p->nr_insn]);
1644 return &p->store[p->nr_insn];
1645 } else {
1646 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1647
1648 push_loop_stack(p, insn);
1649
1650 /* Override the defaults for this instruction:
1651 */
1652 brw_set_dest(p, insn, brw_null_reg());
1653 brw_set_src0(p, insn, brw_null_reg());
1654 brw_set_src1(p, insn, brw_null_reg());
1655
1656 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1657 brw_inst_set_exec_size(devinfo, insn, execute_size);
1658 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1659
1660 return insn;
1661 }
1662 }
1663
1664 /**
1665 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1666 * instruction here.
1667 *
1668 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1669 * nesting, since it can always just point to the end of the block/current loop.
1670 */
1671 static void
1672 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1673 {
1674 const struct gen_device_info *devinfo = p->devinfo;
1675 brw_inst *do_inst = get_inner_do_insn(p);
1676 brw_inst *inst;
1677 unsigned br = brw_jump_scale(devinfo);
1678
1679 assert(devinfo->gen < 6);
1680
1681 for (inst = while_inst - 1; inst != do_inst; inst--) {
1682 /* If the jump count is != 0, that means that this instruction has already
1683 * been patched because it's part of a loop inside of the one we're
1684 * patching.
1685 */
1686 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1687 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1688 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1689 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1690 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1691 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1692 }
1693 }
1694 }
1695
1696 brw_inst *
1697 brw_WHILE(struct brw_codegen *p)
1698 {
1699 const struct gen_device_info *devinfo = p->devinfo;
1700 brw_inst *insn, *do_insn;
1701 unsigned br = brw_jump_scale(devinfo);
1702
1703 if (devinfo->gen >= 6) {
1704 insn = next_insn(p, BRW_OPCODE_WHILE);
1705 do_insn = get_inner_do_insn(p);
1706
1707 if (devinfo->gen >= 8) {
1708 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1709 brw_set_src0(p, insn, brw_imm_d(0));
1710 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1711 } else if (devinfo->gen == 7) {
1712 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1713 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1714 brw_set_src1(p, insn, brw_imm_w(0));
1715 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1716 } else {
1717 brw_set_dest(p, insn, brw_imm_w(0));
1718 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1719 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1720 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1721 }
1722
1723 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1724
1725 } else {
1726 if (p->single_program_flow) {
1727 insn = next_insn(p, BRW_OPCODE_ADD);
1728 do_insn = get_inner_do_insn(p);
1729
1730 brw_set_dest(p, insn, brw_ip_reg());
1731 brw_set_src0(p, insn, brw_ip_reg());
1732 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1733 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1734 } else {
1735 insn = next_insn(p, BRW_OPCODE_WHILE);
1736 do_insn = get_inner_do_insn(p);
1737
1738 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1739
1740 brw_set_dest(p, insn, brw_ip_reg());
1741 brw_set_src0(p, insn, brw_ip_reg());
1742 brw_set_src1(p, insn, brw_imm_d(0));
1743
1744 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1745 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1746 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1747
1748 brw_patch_break_cont(p, insn);
1749 }
1750 }
1751 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1752
1753 p->loop_stack_depth--;
1754
1755 return insn;
1756 }
1757
1758 /* FORWARD JUMPS:
1759 */
1760 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1761 {
1762 const struct gen_device_info *devinfo = p->devinfo;
1763 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1764 unsigned jmpi = 1;
1765
1766 if (devinfo->gen >= 5)
1767 jmpi = 2;
1768
1769 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1770 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1771
1772 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1773 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1774 }
1775
1776 /* To integrate with the above, it makes sense that the comparison
1777 * instruction should populate the flag register. It might be simpler
1778 * just to use the flag reg for most WM tasks?
1779 */
1780 void brw_CMP(struct brw_codegen *p,
1781 struct brw_reg dest,
1782 unsigned conditional,
1783 struct brw_reg src0,
1784 struct brw_reg src1)
1785 {
1786 const struct gen_device_info *devinfo = p->devinfo;
1787 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1788
1789 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1790 brw_set_dest(p, insn, dest);
1791 brw_set_src0(p, insn, src0);
1792 brw_set_src1(p, insn, src1);
1793
1794 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1795 * page says:
1796 * "Any CMP instruction with a null destination must use a {switch}."
1797 *
1798 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1799 * mentioned on their work-arounds pages.
1800 */
1801 if (devinfo->gen == 7) {
1802 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1803 dest.nr == BRW_ARF_NULL) {
1804 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1805 }
1806 }
1807 }
1808
1809 /***********************************************************************
1810 * Helpers for the various SEND message types:
1811 */
1812
1813 /** Extended math function, float[8].
1814 */
1815 void gen4_math(struct brw_codegen *p,
1816 struct brw_reg dest,
1817 unsigned function,
1818 unsigned msg_reg_nr,
1819 struct brw_reg src,
1820 unsigned precision )
1821 {
1822 const struct gen_device_info *devinfo = p->devinfo;
1823 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1824 unsigned data_type;
1825 if (has_scalar_region(src)) {
1826 data_type = BRW_MATH_DATA_SCALAR;
1827 } else {
1828 data_type = BRW_MATH_DATA_VECTOR;
1829 }
1830
1831 assert(devinfo->gen < 6);
1832
1833 /* Example code doesn't set predicate_control for send
1834 * instructions.
1835 */
1836 brw_inst_set_pred_control(devinfo, insn, 0);
1837 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1838
1839 brw_set_dest(p, insn, dest);
1840 brw_set_src0(p, insn, src);
1841 brw_set_math_message(p,
1842 insn,
1843 function,
1844 src.type == BRW_REGISTER_TYPE_D,
1845 precision,
1846 data_type);
1847 }
1848
1849 void gen6_math(struct brw_codegen *p,
1850 struct brw_reg dest,
1851 unsigned function,
1852 struct brw_reg src0,
1853 struct brw_reg src1)
1854 {
1855 const struct gen_device_info *devinfo = p->devinfo;
1856 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1857
1858 assert(devinfo->gen >= 6);
1859
1860 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1861 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1862
1863 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1864 if (devinfo->gen == 6) {
1865 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1866 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1867 }
1868
1869 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1870 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1871 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1872 assert(src0.type != BRW_REGISTER_TYPE_F);
1873 assert(src1.type != BRW_REGISTER_TYPE_F);
1874 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1875 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1876 } else {
1877 assert(src0.type == BRW_REGISTER_TYPE_F);
1878 assert(src1.type == BRW_REGISTER_TYPE_F);
1879 }
1880
1881 /* Source modifiers are ignored for extended math instructions on Gen6. */
1882 if (devinfo->gen == 6) {
1883 assert(!src0.negate);
1884 assert(!src0.abs);
1885 assert(!src1.negate);
1886 assert(!src1.abs);
1887 }
1888
1889 brw_inst_set_math_function(devinfo, insn, function);
1890
1891 brw_set_dest(p, insn, dest);
1892 brw_set_src0(p, insn, src0);
1893 brw_set_src1(p, insn, src1);
1894 }
1895
1896 /**
1897 * Return the right surface index to access the thread scratch space using
1898 * stateless dataport messages.
1899 */
1900 unsigned
1901 brw_scratch_surface_idx(const struct brw_codegen *p)
1902 {
1903 /* The scratch space is thread-local so IA coherency is unnecessary. */
1904 if (p->devinfo->gen >= 8)
1905 return GEN8_BTI_STATELESS_NON_COHERENT;
1906 else
1907 return BRW_BTI_STATELESS;
1908 }
1909
1910 /**
1911 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1912 * using a constant offset per channel.
1913 *
1914 * The offset must be aligned to oword size (16 bytes). Used for
1915 * register spilling.
1916 */
1917 void brw_oword_block_write_scratch(struct brw_codegen *p,
1918 struct brw_reg mrf,
1919 int num_regs,
1920 unsigned offset)
1921 {
1922 const struct gen_device_info *devinfo = p->devinfo;
1923 const unsigned target_cache =
1924 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1925 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1926 BRW_SFID_DATAPORT_WRITE);
1927 uint32_t msg_type;
1928
1929 if (devinfo->gen >= 6)
1930 offset /= 16;
1931
1932 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1933
1934 const unsigned mlen = 1 + num_regs;
1935
1936 /* Set up the message header. This is g0, with g0.2 filled with
1937 * the offset. We don't want to leave our offset around in g0 or
1938 * it'll screw up texture samples, so set it up inside the message
1939 * reg.
1940 */
1941 {
1942 brw_push_insn_state(p);
1943 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1944 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1945 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1946
1947 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1948
1949 /* set message header global offset field (reg 0, element 2) */
1950 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1951 brw_MOV(p,
1952 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1953 mrf.nr,
1954 2), BRW_REGISTER_TYPE_UD),
1955 brw_imm_ud(offset));
1956
1957 brw_pop_insn_state(p);
1958 }
1959
1960 {
1961 struct brw_reg dest;
1962 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1963 int send_commit_msg;
1964 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1965 BRW_REGISTER_TYPE_UW);
1966
1967 brw_inst_set_sfid(devinfo, insn, target_cache);
1968 brw_inst_set_compression(devinfo, insn, false);
1969
1970 if (brw_inst_exec_size(devinfo, insn) >= 16)
1971 src_header = vec16(src_header);
1972
1973 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1974 if (devinfo->gen < 6)
1975 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
1976
1977 /* Until gen6, writes followed by reads from the same location
1978 * are not guaranteed to be ordered unless write_commit is set.
1979 * If set, then a no-op write is issued to the destination
1980 * register to set a dependency, and a read from the destination
1981 * can be used to ensure the ordering.
1982 *
1983 * For gen6, only writes between different threads need ordering
1984 * protection. Our use of DP writes is all about register
1985 * spilling within a thread.
1986 */
1987 if (devinfo->gen >= 6) {
1988 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1989 send_commit_msg = 0;
1990 } else {
1991 dest = src_header;
1992 send_commit_msg = 1;
1993 }
1994
1995 brw_set_dest(p, insn, dest);
1996 if (devinfo->gen >= 6) {
1997 brw_set_src0(p, insn, mrf);
1998 } else {
1999 brw_set_src0(p, insn, brw_null_reg());
2000 }
2001
2002 if (devinfo->gen >= 6)
2003 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2004 else
2005 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2006
2007 brw_set_desc(p, insn,
2008 brw_message_desc(devinfo, mlen, send_commit_msg, true) |
2009 brw_dp_write_desc(devinfo, brw_scratch_surface_idx(p),
2010 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2011 msg_type, 0, /* not a render target */
2012 send_commit_msg));
2013 }
2014 }
2015
2016
2017 /**
2018 * Read a block of owords (half a GRF each) from the scratch buffer
2019 * using a constant index per channel.
2020 *
2021 * Offset must be aligned to oword size (16 bytes). Used for register
2022 * spilling.
2023 */
2024 void
2025 brw_oword_block_read_scratch(struct brw_codegen *p,
2026 struct brw_reg dest,
2027 struct brw_reg mrf,
2028 int num_regs,
2029 unsigned offset)
2030 {
2031 const struct gen_device_info *devinfo = p->devinfo;
2032
2033 if (devinfo->gen >= 6)
2034 offset /= 16;
2035
2036 if (p->devinfo->gen >= 7) {
2037 /* On gen 7 and above, we no longer have message registers and we can
2038 * send from any register we want. By using the destination register
2039 * for the message, we guarantee that the implied message write won't
2040 * accidentally overwrite anything. This has been a problem because
2041 * the MRF registers and source for the final FB write are both fixed
2042 * and may overlap.
2043 */
2044 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2045 } else {
2046 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2047 }
2048 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2049
2050 const unsigned rlen = num_regs;
2051 const unsigned target_cache =
2052 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2053 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2054 BRW_SFID_DATAPORT_READ);
2055
2056 {
2057 brw_push_insn_state(p);
2058 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2059 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2060 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2061
2062 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2063
2064 /* set message header global offset field (reg 0, element 2) */
2065 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2066 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2067
2068 brw_pop_insn_state(p);
2069 }
2070
2071 {
2072 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2073
2074 brw_inst_set_sfid(devinfo, insn, target_cache);
2075 assert(brw_inst_pred_control(devinfo, insn) == 0);
2076 brw_inst_set_compression(devinfo, insn, false);
2077
2078 brw_set_dest(p, insn, dest); /* UW? */
2079 if (devinfo->gen >= 6) {
2080 brw_set_src0(p, insn, mrf);
2081 } else {
2082 brw_set_src0(p, insn, brw_null_reg());
2083 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2084 }
2085
2086 brw_set_desc(p, insn,
2087 brw_message_desc(devinfo, 1, rlen, true) |
2088 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2089 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2090 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2091 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2092 }
2093 }
2094
2095 void
2096 gen7_block_read_scratch(struct brw_codegen *p,
2097 struct brw_reg dest,
2098 int num_regs,
2099 unsigned offset)
2100 {
2101 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2102 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2103
2104 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2105
2106 /* The HW requires that the header is present; this is to get the g0.5
2107 * scratch offset.
2108 */
2109 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2110
2111 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2112 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2113 * is 32 bytes, which happens to be the size of a register.
2114 */
2115 offset /= REG_SIZE;
2116 assert(offset < (1 << 12));
2117
2118 gen7_set_dp_scratch_message(p, insn,
2119 false, /* scratch read */
2120 false, /* OWords */
2121 false, /* invalidate after read */
2122 num_regs,
2123 offset,
2124 1, /* mlen: just g0 */
2125 num_regs, /* rlen */
2126 true); /* header present */
2127 }
2128
2129 /**
2130 * Read float[4] vectors from the data port constant cache.
2131 * Location (in buffer) should be a multiple of 16.
2132 * Used for fetching shader constants.
2133 */
2134 void brw_oword_block_read(struct brw_codegen *p,
2135 struct brw_reg dest,
2136 struct brw_reg mrf,
2137 uint32_t offset,
2138 uint32_t bind_table_index)
2139 {
2140 const struct gen_device_info *devinfo = p->devinfo;
2141 const unsigned target_cache =
2142 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2143 BRW_SFID_DATAPORT_READ);
2144 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2145
2146 /* On newer hardware, offset is in units of owords. */
2147 if (devinfo->gen >= 6)
2148 offset /= 16;
2149
2150 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2151
2152 brw_push_insn_state(p);
2153 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2154 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2155 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2156
2157 brw_push_insn_state(p);
2158 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2159 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2160
2161 /* set message header global offset field (reg 0, element 2) */
2162 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2163 brw_MOV(p,
2164 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2165 mrf.nr,
2166 2), BRW_REGISTER_TYPE_UD),
2167 brw_imm_ud(offset));
2168 brw_pop_insn_state(p);
2169
2170 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2171
2172 brw_inst_set_sfid(devinfo, insn, target_cache);
2173
2174 /* cast dest to a uword[8] vector */
2175 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2176
2177 brw_set_dest(p, insn, dest);
2178 if (devinfo->gen >= 6) {
2179 brw_set_src0(p, insn, mrf);
2180 } else {
2181 brw_set_src0(p, insn, brw_null_reg());
2182 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2183 }
2184
2185 brw_set_desc(p, insn,
2186 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2187 brw_dp_read_desc(devinfo, bind_table_index,
2188 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2189 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2190 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2191
2192 brw_pop_insn_state(p);
2193 }
2194
2195 brw_inst *
2196 brw_fb_WRITE(struct brw_codegen *p,
2197 struct brw_reg payload,
2198 struct brw_reg implied_header,
2199 unsigned msg_control,
2200 unsigned binding_table_index,
2201 unsigned msg_length,
2202 unsigned response_length,
2203 bool eot,
2204 bool last_render_target,
2205 bool header_present)
2206 {
2207 const struct gen_device_info *devinfo = p->devinfo;
2208 const unsigned target_cache =
2209 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2210 BRW_SFID_DATAPORT_WRITE);
2211 brw_inst *insn;
2212 unsigned msg_type;
2213 struct brw_reg dest, src0;
2214
2215 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2216 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2217 else
2218 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2219
2220 if (devinfo->gen >= 6) {
2221 insn = next_insn(p, BRW_OPCODE_SENDC);
2222 } else {
2223 insn = next_insn(p, BRW_OPCODE_SEND);
2224 }
2225 brw_inst_set_sfid(devinfo, insn, target_cache);
2226 brw_inst_set_compression(devinfo, insn, false);
2227
2228 if (devinfo->gen >= 6) {
2229 /* headerless version, just submit color payload */
2230 src0 = payload;
2231
2232 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2233 } else {
2234 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2235 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2236 src0 = implied_header;
2237
2238 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2239 }
2240
2241 brw_set_dest(p, insn, dest);
2242 brw_set_src0(p, insn, src0);
2243 brw_set_desc(p, insn,
2244 brw_message_desc(devinfo, msg_length, response_length,
2245 header_present) |
2246 brw_dp_write_desc(devinfo, binding_table_index, msg_control,
2247 msg_type, last_render_target,
2248 0 /* send_commit_msg */));
2249 brw_inst_set_eot(devinfo, insn, eot);
2250
2251 return insn;
2252 }
2253
2254 brw_inst *
2255 gen9_fb_READ(struct brw_codegen *p,
2256 struct brw_reg dst,
2257 struct brw_reg payload,
2258 unsigned binding_table_index,
2259 unsigned msg_length,
2260 unsigned response_length,
2261 bool per_sample)
2262 {
2263 const struct gen_device_info *devinfo = p->devinfo;
2264 assert(devinfo->gen >= 9);
2265 const unsigned msg_subtype =
2266 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2267 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2268
2269 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2270 brw_set_dest(p, insn, dst);
2271 brw_set_src0(p, insn, payload);
2272 brw_set_desc(
2273 p, insn,
2274 brw_message_desc(devinfo, msg_length, response_length, true) |
2275 brw_dp_read_desc(devinfo, binding_table_index,
2276 per_sample << 5 | msg_subtype,
2277 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2278 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2279 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2280
2281 return insn;
2282 }
2283
2284 /**
2285 * Texture sample instruction.
2286 * Note: the msg_type plus msg_length values determine exactly what kind
2287 * of sampling operation is performed. See volume 4, page 161 of docs.
2288 */
2289 void brw_SAMPLE(struct brw_codegen *p,
2290 struct brw_reg dest,
2291 unsigned msg_reg_nr,
2292 struct brw_reg src0,
2293 unsigned binding_table_index,
2294 unsigned sampler,
2295 unsigned msg_type,
2296 unsigned response_length,
2297 unsigned msg_length,
2298 unsigned header_present,
2299 unsigned simd_mode,
2300 unsigned return_format)
2301 {
2302 const struct gen_device_info *devinfo = p->devinfo;
2303 brw_inst *insn;
2304
2305 if (msg_reg_nr != -1)
2306 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2307
2308 insn = next_insn(p, BRW_OPCODE_SEND);
2309 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2310 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2311
2312 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2313 *
2314 * "Instruction compression is not allowed for this instruction (that
2315 * is, send). The hardware behavior is undefined if this instruction is
2316 * set as compressed. However, compress control can be set to "SecHalf"
2317 * to affect the EMask generation."
2318 *
2319 * No similar wording is found in later PRMs, but there are examples
2320 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2321 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2322 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2323 */
2324 brw_inst_set_compression(devinfo, insn, false);
2325
2326 if (devinfo->gen < 6)
2327 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2328
2329 brw_set_dest(p, insn, dest);
2330 brw_set_src0(p, insn, src0);
2331 brw_set_desc(p, insn,
2332 brw_message_desc(devinfo, msg_length, response_length,
2333 header_present) |
2334 brw_sampler_desc(devinfo, binding_table_index, sampler,
2335 msg_type, simd_mode, return_format));
2336 }
2337
2338 /* Adjust the message header's sampler state pointer to
2339 * select the correct group of 16 samplers.
2340 */
2341 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2342 struct brw_reg header,
2343 struct brw_reg sampler_index)
2344 {
2345 /* The "Sampler Index" field can only store values between 0 and 15.
2346 * However, we can add an offset to the "Sampler State Pointer"
2347 * field, effectively selecting a different set of 16 samplers.
2348 *
2349 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2350 * offset, and each sampler state is only 16-bytes, so we can't
2351 * exclusively use the offset - we have to use both.
2352 */
2353
2354 const struct gen_device_info *devinfo = p->devinfo;
2355
2356 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2357 const int sampler_state_size = 16; /* 16 bytes */
2358 uint32_t sampler = sampler_index.ud;
2359
2360 if (sampler >= 16) {
2361 assert(devinfo->is_haswell || devinfo->gen >= 8);
2362 brw_ADD(p,
2363 get_element_ud(header, 3),
2364 get_element_ud(brw_vec8_grf(0, 0), 3),
2365 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2366 }
2367 } else {
2368 /* Non-const sampler array indexing case */
2369 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2370 return;
2371 }
2372
2373 struct brw_reg temp = get_element_ud(header, 3);
2374
2375 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2376 brw_SHL(p, temp, temp, brw_imm_ud(4));
2377 brw_ADD(p,
2378 get_element_ud(header, 3),
2379 get_element_ud(brw_vec8_grf(0, 0), 3),
2380 temp);
2381 }
2382 }
2383
2384 /* All these variables are pretty confusing - we might be better off
2385 * using bitmasks and macros for this, in the old style. Or perhaps
2386 * just having the caller instantiate the fields in dword3 itself.
2387 */
2388 void brw_urb_WRITE(struct brw_codegen *p,
2389 struct brw_reg dest,
2390 unsigned msg_reg_nr,
2391 struct brw_reg src0,
2392 enum brw_urb_write_flags flags,
2393 unsigned msg_length,
2394 unsigned response_length,
2395 unsigned offset,
2396 unsigned swizzle)
2397 {
2398 const struct gen_device_info *devinfo = p->devinfo;
2399 brw_inst *insn;
2400
2401 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2402
2403 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2404 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2405 brw_push_insn_state(p);
2406 brw_set_default_access_mode(p, BRW_ALIGN_1);
2407 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2408 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2409 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2410 BRW_REGISTER_TYPE_UD),
2411 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2412 brw_imm_ud(0xff00));
2413 brw_pop_insn_state(p);
2414 }
2415
2416 insn = next_insn(p, BRW_OPCODE_SEND);
2417
2418 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2419
2420 brw_set_dest(p, insn, dest);
2421 brw_set_src0(p, insn, src0);
2422 brw_set_src1(p, insn, brw_imm_d(0));
2423
2424 if (devinfo->gen < 6)
2425 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2426
2427 brw_set_urb_message(p,
2428 insn,
2429 flags,
2430 msg_length,
2431 response_length,
2432 offset,
2433 swizzle);
2434 }
2435
2436 void
2437 brw_send_indirect_message(struct brw_codegen *p,
2438 unsigned sfid,
2439 struct brw_reg dst,
2440 struct brw_reg payload,
2441 struct brw_reg desc,
2442 unsigned desc_imm)
2443 {
2444 const struct gen_device_info *devinfo = p->devinfo;
2445 struct brw_inst *send;
2446
2447 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2448
2449 assert(desc.type == BRW_REGISTER_TYPE_UD);
2450
2451 if (desc.file == BRW_IMMEDIATE_VALUE) {
2452 send = next_insn(p, BRW_OPCODE_SEND);
2453 brw_set_desc(p, send, desc.ud | desc_imm);
2454
2455 } else {
2456 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2457
2458 brw_push_insn_state(p);
2459 brw_set_default_access_mode(p, BRW_ALIGN_1);
2460 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2461 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2462 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2463
2464 /* Load the indirect descriptor to an address register using OR so the
2465 * caller can specify additional descriptor bits with the desc_imm
2466 * immediate.
2467 */
2468 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2469
2470 brw_pop_insn_state(p);
2471
2472 send = next_insn(p, BRW_OPCODE_SEND);
2473 brw_set_src1(p, send, addr);
2474 }
2475
2476 brw_set_dest(p, send, dst);
2477 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2478 brw_inst_set_sfid(devinfo, send, sfid);
2479 }
2480
2481 static void
2482 brw_send_indirect_surface_message(struct brw_codegen *p,
2483 unsigned sfid,
2484 struct brw_reg dst,
2485 struct brw_reg payload,
2486 struct brw_reg surface,
2487 unsigned desc_imm)
2488 {
2489 if (surface.file != BRW_IMMEDIATE_VALUE) {
2490 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2491
2492 brw_push_insn_state(p);
2493 brw_set_default_access_mode(p, BRW_ALIGN_1);
2494 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2495 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2496 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2497
2498 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2499 * some surface array is accessed out of bounds.
2500 */
2501 brw_AND(p, addr,
2502 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2503 BRW_GET_SWZ(surface.swizzle, 0)),
2504 brw_imm_ud(0xff));
2505
2506 brw_pop_insn_state(p);
2507
2508 surface = addr;
2509 }
2510
2511 brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm);
2512 }
2513
2514 static bool
2515 while_jumps_before_offset(const struct gen_device_info *devinfo,
2516 brw_inst *insn, int while_offset, int start_offset)
2517 {
2518 int scale = 16 / brw_jump_scale(devinfo);
2519 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2520 : brw_inst_jip(devinfo, insn);
2521 assert(jip < 0);
2522 return while_offset + jip * scale <= start_offset;
2523 }
2524
2525
2526 static int
2527 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2528 {
2529 int offset;
2530 void *store = p->store;
2531 const struct gen_device_info *devinfo = p->devinfo;
2532
2533 int depth = 0;
2534
2535 for (offset = next_offset(devinfo, store, start_offset);
2536 offset < p->next_insn_offset;
2537 offset = next_offset(devinfo, store, offset)) {
2538 brw_inst *insn = store + offset;
2539
2540 switch (brw_inst_opcode(devinfo, insn)) {
2541 case BRW_OPCODE_IF:
2542 depth++;
2543 break;
2544 case BRW_OPCODE_ENDIF:
2545 if (depth == 0)
2546 return offset;
2547 depth--;
2548 break;
2549 case BRW_OPCODE_WHILE:
2550 /* If the while doesn't jump before our instruction, it's the end
2551 * of a sibling do...while loop. Ignore it.
2552 */
2553 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2554 continue;
2555 /* fallthrough */
2556 case BRW_OPCODE_ELSE:
2557 case BRW_OPCODE_HALT:
2558 if (depth == 0)
2559 return offset;
2560 }
2561 }
2562
2563 return 0;
2564 }
2565
2566 /* There is no DO instruction on gen6, so to find the end of the loop
2567 * we have to see if the loop is jumping back before our start
2568 * instruction.
2569 */
2570 static int
2571 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2572 {
2573 const struct gen_device_info *devinfo = p->devinfo;
2574 int offset;
2575 void *store = p->store;
2576
2577 assert(devinfo->gen >= 6);
2578
2579 /* Always start after the instruction (such as a WHILE) we're trying to fix
2580 * up.
2581 */
2582 for (offset = next_offset(devinfo, store, start_offset);
2583 offset < p->next_insn_offset;
2584 offset = next_offset(devinfo, store, offset)) {
2585 brw_inst *insn = store + offset;
2586
2587 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2588 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2589 return offset;
2590 }
2591 }
2592 assert(!"not reached");
2593 return start_offset;
2594 }
2595
2596 /* After program generation, go back and update the UIP and JIP of
2597 * BREAK, CONT, and HALT instructions to their correct locations.
2598 */
2599 void
2600 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2601 {
2602 const struct gen_device_info *devinfo = p->devinfo;
2603 int offset;
2604 int br = brw_jump_scale(devinfo);
2605 int scale = 16 / br;
2606 void *store = p->store;
2607
2608 if (devinfo->gen < 6)
2609 return;
2610
2611 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2612 brw_inst *insn = store + offset;
2613 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2614
2615 int block_end_offset = brw_find_next_block_end(p, offset);
2616 switch (brw_inst_opcode(devinfo, insn)) {
2617 case BRW_OPCODE_BREAK:
2618 assert(block_end_offset != 0);
2619 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2620 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2621 brw_inst_set_uip(devinfo, insn,
2622 (brw_find_loop_end(p, offset) - offset +
2623 (devinfo->gen == 6 ? 16 : 0)) / scale);
2624 break;
2625 case BRW_OPCODE_CONTINUE:
2626 assert(block_end_offset != 0);
2627 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2628 brw_inst_set_uip(devinfo, insn,
2629 (brw_find_loop_end(p, offset) - offset) / scale);
2630
2631 assert(brw_inst_uip(devinfo, insn) != 0);
2632 assert(brw_inst_jip(devinfo, insn) != 0);
2633 break;
2634
2635 case BRW_OPCODE_ENDIF: {
2636 int32_t jump = (block_end_offset == 0) ?
2637 1 * br : (block_end_offset - offset) / scale;
2638 if (devinfo->gen >= 7)
2639 brw_inst_set_jip(devinfo, insn, jump);
2640 else
2641 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2642 break;
2643 }
2644
2645 case BRW_OPCODE_HALT:
2646 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2647 *
2648 * "In case of the halt instruction not inside any conditional
2649 * code block, the value of <JIP> and <UIP> should be the
2650 * same. In case of the halt instruction inside conditional code
2651 * block, the <UIP> should be the end of the program, and the
2652 * <JIP> should be end of the most inner conditional code block."
2653 *
2654 * The uip will have already been set by whoever set up the
2655 * instruction.
2656 */
2657 if (block_end_offset == 0) {
2658 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2659 } else {
2660 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2661 }
2662 assert(brw_inst_uip(devinfo, insn) != 0);
2663 assert(brw_inst_jip(devinfo, insn) != 0);
2664 break;
2665 }
2666 }
2667 }
2668
2669 void brw_ff_sync(struct brw_codegen *p,
2670 struct brw_reg dest,
2671 unsigned msg_reg_nr,
2672 struct brw_reg src0,
2673 bool allocate,
2674 unsigned response_length,
2675 bool eot)
2676 {
2677 const struct gen_device_info *devinfo = p->devinfo;
2678 brw_inst *insn;
2679
2680 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2681
2682 insn = next_insn(p, BRW_OPCODE_SEND);
2683 brw_set_dest(p, insn, dest);
2684 brw_set_src0(p, insn, src0);
2685 brw_set_src1(p, insn, brw_imm_d(0));
2686
2687 if (devinfo->gen < 6)
2688 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2689
2690 brw_set_ff_sync_message(p,
2691 insn,
2692 allocate,
2693 response_length,
2694 eot);
2695 }
2696
2697 /**
2698 * Emit the SEND instruction necessary to generate stream output data on Gen6
2699 * (for transform feedback).
2700 *
2701 * If send_commit_msg is true, this is the last piece of stream output data
2702 * from this thread, so send the data as a committed write. According to the
2703 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2704 *
2705 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2706 * writes are complete by sending the final write as a committed write."
2707 */
2708 void
2709 brw_svb_write(struct brw_codegen *p,
2710 struct brw_reg dest,
2711 unsigned msg_reg_nr,
2712 struct brw_reg src0,
2713 unsigned binding_table_index,
2714 bool send_commit_msg)
2715 {
2716 const struct gen_device_info *devinfo = p->devinfo;
2717 const unsigned target_cache =
2718 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2719 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2720 BRW_SFID_DATAPORT_WRITE);
2721 brw_inst *insn;
2722
2723 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2724
2725 insn = next_insn(p, BRW_OPCODE_SEND);
2726 brw_inst_set_sfid(devinfo, insn, target_cache);
2727 brw_set_dest(p, insn, dest);
2728 brw_set_src0(p, insn, src0);
2729 brw_set_desc(p, insn,
2730 brw_message_desc(devinfo, 1, send_commit_msg, true) |
2731 brw_dp_write_desc(devinfo, binding_table_index,
2732 0, /* msg_control: ignored */
2733 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2734 0, /* last_render_target: ignored */
2735 send_commit_msg)); /* send_commit_msg */
2736 }
2737
2738 static unsigned
2739 brw_surface_payload_size(struct brw_codegen *p,
2740 unsigned num_channels,
2741 bool has_simd4x2,
2742 bool has_simd16)
2743 {
2744 if (has_simd4x2 && brw_get_default_access_mode(p) == BRW_ALIGN_16)
2745 return 1;
2746 else if (has_simd16 && brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2747 return 2 * num_channels;
2748 else
2749 return num_channels;
2750 }
2751
2752 static uint32_t
2753 brw_dp_untyped_atomic_desc(struct brw_codegen *p,
2754 unsigned atomic_op,
2755 bool response_expected)
2756 {
2757 const struct gen_device_info *devinfo = p->devinfo;
2758 unsigned msg_control =
2759 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2760 (response_expected ? 1 << 5 : 0); /* Return data expected */
2761 unsigned msg_type;
2762
2763 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2764 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2765 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2766 msg_control |= 1 << 4; /* SIMD8 mode */
2767
2768 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP;
2769 } else {
2770 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2;
2771 }
2772 } else {
2773 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2774 msg_control |= 1 << 4; /* SIMD8 mode */
2775
2776 msg_type = GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP;
2777 }
2778
2779 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2780 }
2781
2782 void
2783 brw_untyped_atomic(struct brw_codegen *p,
2784 struct brw_reg dst,
2785 struct brw_reg payload,
2786 struct brw_reg surface,
2787 unsigned atomic_op,
2788 unsigned msg_length,
2789 bool response_expected,
2790 bool header_present)
2791 {
2792 const struct gen_device_info *devinfo = p->devinfo;
2793 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2794 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2795 GEN7_SFID_DATAPORT_DATA_CACHE);
2796 const unsigned response_length = brw_surface_payload_size(
2797 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, true);
2798 const unsigned desc =
2799 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2800 brw_dp_untyped_atomic_desc(p, atomic_op, response_expected);
2801 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2802 /* Mask out unused components -- This is especially important in Align16
2803 * mode on generations that don't have native support for SIMD4x2 atomics,
2804 * because unused but enabled components will cause the dataport to perform
2805 * additional atomic operations on the addresses that happen to be in the
2806 * uninitialized Y, Z and W coordinates of the payload.
2807 */
2808 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2809
2810 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
2811 payload, surface, desc);
2812 }
2813
2814 static uint32_t
2815 brw_dp_untyped_atomic_float_desc(struct brw_codegen *p,
2816 unsigned atomic_op,
2817 bool response_expected)
2818 {
2819 const struct gen_device_info *devinfo = p->devinfo;
2820 const unsigned msg_type = GEN9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP;
2821 unsigned msg_control =
2822 atomic_op | /* Atomic Operation Type: BRW_AOP_F* */
2823 (response_expected ? 1 << 5 : 0); /* Return data expected */
2824
2825 assert(devinfo->gen >= 9);
2826 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2827
2828 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2829 msg_control |= 1 << 4; /* SIMD8 mode */
2830
2831 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2832 }
2833
2834 void
2835 brw_untyped_atomic_float(struct brw_codegen *p,
2836 struct brw_reg dst,
2837 struct brw_reg payload,
2838 struct brw_reg surface,
2839 unsigned atomic_op,
2840 unsigned msg_length,
2841 bool response_expected,
2842 bool header_present)
2843 {
2844 const struct gen_device_info *devinfo = p->devinfo;
2845
2846 assert(devinfo->gen >= 9);
2847 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2848
2849 const unsigned sfid = HSW_SFID_DATAPORT_DATA_CACHE_1;
2850 const unsigned response_length = brw_surface_payload_size(
2851 p, response_expected, true, true);
2852 const unsigned desc =
2853 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2854 brw_dp_untyped_atomic_float_desc(p, atomic_op, response_expected);
2855
2856 brw_send_indirect_surface_message(p, sfid,
2857 brw_writemask(dst, WRITEMASK_XYZW),
2858 payload, surface, desc);
2859 }
2860
2861 static uint32_t
2862 brw_dp_untyped_surface_read_desc(struct brw_codegen *p,
2863 unsigned num_channels)
2864 {
2865 const struct gen_device_info *devinfo = p->devinfo;
2866 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2867 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2868 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ);
2869 /* Set mask of 32-bit channels to drop. */
2870 unsigned msg_control = 0xf & (0xf << num_channels);
2871
2872 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2873 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2874 msg_control |= 1 << 4; /* SIMD16 mode */
2875 else
2876 msg_control |= 2 << 4; /* SIMD8 mode */
2877 }
2878
2879 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2880 }
2881
2882 void
2883 brw_untyped_surface_read(struct brw_codegen *p,
2884 struct brw_reg dst,
2885 struct brw_reg payload,
2886 struct brw_reg surface,
2887 unsigned msg_length,
2888 unsigned num_channels)
2889 {
2890 const struct gen_device_info *devinfo = p->devinfo;
2891 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2892 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2893 GEN7_SFID_DATAPORT_DATA_CACHE);
2894 const unsigned response_length =
2895 brw_surface_payload_size(p, num_channels, true, true);
2896 const unsigned desc =
2897 brw_message_desc(devinfo, msg_length, response_length, false) |
2898 brw_dp_untyped_surface_read_desc(p, num_channels);
2899
2900 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
2901 }
2902
2903 static uint32_t
2904 brw_dp_untyped_surface_write_desc(struct brw_codegen *p,
2905 unsigned num_channels)
2906 {
2907 const struct gen_device_info *devinfo = p->devinfo;
2908 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2909 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2910 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2911 /* Set mask of 32-bit channels to drop. */
2912 unsigned msg_control = 0xf & (0xf << num_channels);
2913
2914 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2915 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2916 msg_control |= 1 << 4; /* SIMD16 mode */
2917 else
2918 msg_control |= 2 << 4; /* SIMD8 mode */
2919 } else {
2920 if (devinfo->gen >= 8 || devinfo->is_haswell)
2921 msg_control |= 0 << 4; /* SIMD4x2 mode */
2922 else
2923 msg_control |= 2 << 4; /* SIMD8 mode */
2924 }
2925
2926 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2927 }
2928
2929 void
2930 brw_untyped_surface_write(struct brw_codegen *p,
2931 struct brw_reg payload,
2932 struct brw_reg surface,
2933 unsigned msg_length,
2934 unsigned num_channels,
2935 bool header_present)
2936 {
2937 const struct gen_device_info *devinfo = p->devinfo;
2938 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2939 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2940 GEN7_SFID_DATAPORT_DATA_CACHE);
2941 const unsigned desc =
2942 brw_message_desc(devinfo, msg_length, 0, header_present) |
2943 brw_dp_untyped_surface_write_desc(p, num_channels);
2944 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2945 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2946 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2947 WRITEMASK_X : WRITEMASK_XYZW;
2948
2949 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
2950 payload, surface, desc);
2951 }
2952
2953 static unsigned
2954 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
2955 {
2956 switch (bit_size) {
2957 case 8:
2958 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
2959 case 16:
2960 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
2961 case 32:
2962 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
2963 default:
2964 unreachable("Unsupported bit_size for byte scattered messages");
2965 }
2966 }
2967
2968 static uint32_t
2969 brw_dp_byte_scattered_desc(struct brw_codegen *p, unsigned bit_size,
2970 unsigned msg_type)
2971 {
2972 const struct gen_device_info *devinfo = p->devinfo;
2973 unsigned msg_control =
2974 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
2975
2976 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2977 msg_control |= 1; /* SIMD16 mode */
2978 else
2979 msg_control |= 0; /* SIMD8 mode */
2980
2981 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2982 }
2983
2984 void
2985 brw_byte_scattered_read(struct brw_codegen *p,
2986 struct brw_reg dst,
2987 struct brw_reg payload,
2988 struct brw_reg surface,
2989 unsigned msg_length,
2990 unsigned bit_size)
2991 {
2992 const struct gen_device_info *devinfo = p->devinfo;
2993 assert(devinfo->gen > 7 || devinfo->is_haswell);
2994 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2995 const unsigned response_length =
2996 brw_surface_payload_size(p, 1, true, true);
2997 const unsigned desc =
2998 brw_message_desc(devinfo, msg_length, response_length, false) |
2999 brw_dp_byte_scattered_desc(p, bit_size,
3000 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3001
3002 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
3003 dst, payload, surface, desc);
3004 }
3005
3006 void
3007 brw_byte_scattered_write(struct brw_codegen *p,
3008 struct brw_reg payload,
3009 struct brw_reg surface,
3010 unsigned msg_length,
3011 unsigned bit_size,
3012 bool header_present)
3013 {
3014 const struct gen_device_info *devinfo = p->devinfo;
3015 assert(devinfo->gen > 7 || devinfo->is_haswell);
3016 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3017 const unsigned desc =
3018 brw_message_desc(devinfo, msg_length, 0, header_present) |
3019 brw_dp_byte_scattered_desc(p, bit_size,
3020 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3021
3022 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
3023 brw_writemask(brw_null_reg(),
3024 WRITEMASK_XYZW),
3025 payload, surface, desc);
3026 }
3027
3028 static uint32_t
3029 brw_dp_typed_atomic_desc(struct brw_codegen *p,
3030 unsigned atomic_op,
3031 bool response_expected)
3032 {
3033 const struct gen_device_info *devinfo = p->devinfo;
3034 unsigned msg_control =
3035 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3036 (response_expected ? 1 << 5 : 0); /* Return data expected */
3037 unsigned msg_type;
3038
3039 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3040 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3041 if ((brw_get_default_group(p) / 8) % 2 == 1)
3042 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3043
3044 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP;
3045 } else {
3046 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2;
3047 }
3048
3049 } else {
3050 if ((brw_get_default_group(p) / 8) % 2 == 1)
3051 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3052
3053 msg_type = GEN7_DATAPORT_RC_TYPED_ATOMIC_OP;
3054 }
3055
3056 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3057 }
3058
3059 void
3060 brw_typed_atomic(struct brw_codegen *p,
3061 struct brw_reg dst,
3062 struct brw_reg payload,
3063 struct brw_reg surface,
3064 unsigned atomic_op,
3065 unsigned msg_length,
3066 bool response_expected,
3067 bool header_present) {
3068 const struct gen_device_info *devinfo = p->devinfo;
3069 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3070 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3071 GEN6_SFID_DATAPORT_RENDER_CACHE);
3072 const unsigned response_length = brw_surface_payload_size(
3073 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, false);
3074 const unsigned desc =
3075 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3076 brw_dp_typed_atomic_desc(p, atomic_op, response_expected);
3077 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3078 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3079 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3080
3081 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
3082 payload, surface, desc);
3083 }
3084
3085 static uint32_t
3086 brw_dp_typed_surface_read_desc(struct brw_codegen *p,
3087 unsigned num_channels)
3088 {
3089 const struct gen_device_info *devinfo = p->devinfo;
3090 /* Set mask of unused channels. */
3091 unsigned msg_control = 0xf & (0xf << num_channels);
3092 unsigned msg_type;
3093
3094 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3095 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3096 if ((brw_get_default_group(p) / 8) % 2 == 1)
3097 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3098 else
3099 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3100 }
3101
3102 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ;
3103 } else {
3104 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3105 if ((brw_get_default_group(p) / 8) % 2 == 1)
3106 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3107 }
3108
3109 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_READ;
3110 }
3111
3112 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3113 }
3114
3115 void
3116 brw_typed_surface_read(struct brw_codegen *p,
3117 struct brw_reg dst,
3118 struct brw_reg payload,
3119 struct brw_reg surface,
3120 unsigned msg_length,
3121 unsigned num_channels,
3122 bool header_present)
3123 {
3124 const struct gen_device_info *devinfo = p->devinfo;
3125 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3126 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3127 GEN6_SFID_DATAPORT_RENDER_CACHE);
3128 const unsigned response_length = brw_surface_payload_size(
3129 p, num_channels, devinfo->gen >= 8 || devinfo->is_haswell, false);
3130 const unsigned desc =
3131 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3132 brw_dp_typed_surface_read_desc(p, num_channels);
3133
3134 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
3135 }
3136
3137 static uint32_t
3138 brw_dp_typed_surface_write_desc(struct brw_codegen *p,
3139 unsigned num_channels)
3140 {
3141 const struct gen_device_info *devinfo = p->devinfo;
3142 /* Set mask of unused channels. */
3143 unsigned msg_control = 0xf & (0xf << num_channels);
3144 unsigned msg_type;
3145
3146 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3147 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3148 if ((brw_get_default_group(p) / 8) % 2 == 1)
3149 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3150 else
3151 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3152 }
3153
3154 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE;
3155
3156 } else {
3157 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3158 if ((brw_get_default_group(p) / 8) % 2 == 1)
3159 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3160 }
3161
3162 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE;
3163 }
3164
3165 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3166 }
3167
3168 void
3169 brw_typed_surface_write(struct brw_codegen *p,
3170 struct brw_reg payload,
3171 struct brw_reg surface,
3172 unsigned msg_length,
3173 unsigned num_channels,
3174 bool header_present)
3175 {
3176 const struct gen_device_info *devinfo = p->devinfo;
3177 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3178 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3179 GEN6_SFID_DATAPORT_RENDER_CACHE);
3180 const unsigned desc =
3181 brw_message_desc(devinfo, msg_length, 0, header_present) |
3182 brw_dp_typed_surface_write_desc(p, num_channels);
3183 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3184 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3185 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3186 WRITEMASK_X : WRITEMASK_XYZW);
3187
3188 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
3189 payload, surface, desc);
3190 }
3191
3192 static void
3193 brw_set_memory_fence_message(struct brw_codegen *p,
3194 struct brw_inst *insn,
3195 enum brw_message_target sfid,
3196 bool commit_enable)
3197 {
3198 const struct gen_device_info *devinfo = p->devinfo;
3199
3200 brw_set_desc(p, insn, brw_message_desc(
3201 devinfo, 1, (commit_enable ? 1 : 0), true));
3202
3203 brw_inst_set_sfid(devinfo, insn, sfid);
3204
3205 switch (sfid) {
3206 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3207 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3208 break;
3209 case GEN7_SFID_DATAPORT_DATA_CACHE:
3210 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3211 break;
3212 default:
3213 unreachable("Not reached");
3214 }
3215
3216 if (commit_enable)
3217 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3218 }
3219
3220 void
3221 brw_memory_fence(struct brw_codegen *p,
3222 struct brw_reg dst,
3223 enum opcode send_op)
3224 {
3225 const struct gen_device_info *devinfo = p->devinfo;
3226 const bool commit_enable =
3227 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3228 (devinfo->gen == 7 && !devinfo->is_haswell);
3229 struct brw_inst *insn;
3230
3231 brw_push_insn_state(p);
3232 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3233 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3234 dst = vec1(dst);
3235
3236 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3237 * message doesn't write anything back.
3238 */
3239 insn = next_insn(p, send_op);
3240 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3241 brw_set_dest(p, insn, dst);
3242 brw_set_src0(p, insn, dst);
3243 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3244 commit_enable);
3245
3246 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3247 /* IVB does typed surface access through the render cache, so we need to
3248 * flush it too. Use a different register so both flushes can be
3249 * pipelined by the hardware.
3250 */
3251 insn = next_insn(p, send_op);
3252 brw_set_dest(p, insn, offset(dst, 1));
3253 brw_set_src0(p, insn, offset(dst, 1));
3254 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3255 commit_enable);
3256
3257 /* Now write the response of the second message into the response of the
3258 * first to trigger a pipeline stall -- This way future render and data
3259 * cache messages will be properly ordered with respect to past data and
3260 * render cache messages.
3261 */
3262 brw_MOV(p, dst, offset(dst, 1));
3263 }
3264
3265 brw_pop_insn_state(p);
3266 }
3267
3268 void
3269 brw_pixel_interpolator_query(struct brw_codegen *p,
3270 struct brw_reg dest,
3271 struct brw_reg mrf,
3272 bool noperspective,
3273 unsigned mode,
3274 struct brw_reg data,
3275 unsigned msg_length,
3276 unsigned response_length)
3277 {
3278 const struct gen_device_info *devinfo = p->devinfo;
3279 const uint16_t exec_size = brw_get_default_exec_size(p);
3280 const unsigned slot_group = brw_get_default_group(p) / 16;
3281 const unsigned simd_mode = (exec_size == BRW_EXECUTE_16);
3282 const unsigned desc =
3283 brw_message_desc(devinfo, msg_length, response_length, false) |
3284 brw_pixel_interp_desc(devinfo, mode, noperspective, simd_mode,
3285 slot_group);
3286
3287 /* brw_send_indirect_message will automatically use a direct send message
3288 * if data is actually immediate.
3289 */
3290 brw_send_indirect_message(p,
3291 GEN7_SFID_PIXEL_INTERPOLATOR,
3292 dest,
3293 mrf,
3294 vec1(data),
3295 desc);
3296 }
3297
3298 void
3299 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3300 struct brw_reg mask)
3301 {
3302 const struct gen_device_info *devinfo = p->devinfo;
3303 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3304 const unsigned qtr_control = brw_get_default_group(p) / 8;
3305 brw_inst *inst;
3306
3307 assert(devinfo->gen >= 7);
3308 assert(mask.type == BRW_REGISTER_TYPE_UD);
3309
3310 brw_push_insn_state(p);
3311
3312 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3313 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3314
3315 if (devinfo->gen >= 8) {
3316 /* Getting the first active channel index is easy on Gen8: Just find
3317 * the first bit set in the execution mask. The register exists on
3318 * HSW already but it reads back as all ones when the current
3319 * instruction has execution masking disabled, so it's kind of
3320 * useless.
3321 */
3322 struct brw_reg exec_mask =
3323 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3324
3325 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3326 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3327 /* Unfortunately, ce0 does not take into account the thread
3328 * dispatch mask, which may be a problem in cases where it's not
3329 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3330 * some n). Combine ce0 with the given dispatch (or vector) mask
3331 * to mask off those channels which were never dispatched by the
3332 * hardware.
3333 */
3334 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3335 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3336 exec_mask = vec1(dst);
3337 }
3338
3339 /* Quarter control has the effect of magically shifting the value of
3340 * ce0 so you'll get the first active channel relative to the
3341 * specified quarter control as result.
3342 */
3343 inst = brw_FBL(p, vec1(dst), exec_mask);
3344 } else {
3345 const struct brw_reg flag = brw_flag_reg(p->current->flag_subreg / 2,
3346 p->current->flag_subreg % 2);
3347
3348 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3349 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3350
3351 /* Run enough instructions returning zero with execution masking and
3352 * a conditional modifier enabled in order to get the full execution
3353 * mask in f1.0. We could use a single 32-wide move here if it
3354 * weren't because of the hardware bug that causes channel enables to
3355 * be applied incorrectly to the second half of 32-wide instructions
3356 * on Gen7.
3357 */
3358 const unsigned lower_size = MIN2(16, exec_size);
3359 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3360 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3361 brw_imm_uw(0));
3362 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3363 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3364 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3365 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3366 }
3367
3368 /* Find the first bit set in the exec_size-wide portion of the flag
3369 * register that was updated by the last sequence of MOV
3370 * instructions.
3371 */
3372 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3373 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3374 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3375 }
3376 } else {
3377 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3378
3379 if (devinfo->gen >= 8 &&
3380 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3381 /* In SIMD4x2 mode the first active channel index is just the
3382 * negation of the first bit of the mask register. Note that ce0
3383 * doesn't take into account the dispatch mask, so the Gen7 path
3384 * should be used instead unless you have the guarantee that the
3385 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3386 * for some n).
3387 */
3388 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3389 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3390 brw_imm_ud(1));
3391
3392 } else {
3393 /* Overwrite the destination without and with execution masking to
3394 * find out which of the channels is active.
3395 */
3396 brw_push_insn_state(p);
3397 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3398 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3399 brw_imm_ud(1));
3400
3401 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3402 brw_imm_ud(0));
3403 brw_pop_insn_state(p);
3404 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3405 }
3406 }
3407
3408 brw_pop_insn_state(p);
3409 }
3410
3411 void
3412 brw_broadcast(struct brw_codegen *p,
3413 struct brw_reg dst,
3414 struct brw_reg src,
3415 struct brw_reg idx)
3416 {
3417 const struct gen_device_info *devinfo = p->devinfo;
3418 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3419 brw_inst *inst;
3420
3421 brw_push_insn_state(p);
3422 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3423 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3424
3425 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3426 src.address_mode == BRW_ADDRESS_DIRECT);
3427 assert(!src.abs && !src.negate);
3428 assert(src.type == dst.type);
3429
3430 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3431 idx.file == BRW_IMMEDIATE_VALUE) {
3432 /* Trivial, the source is already uniform or the index is a constant.
3433 * We will typically not get here if the optimizer is doing its job, but
3434 * asserting would be mean.
3435 */
3436 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3437 brw_MOV(p, dst,
3438 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3439 stride(suboffset(src, 4 * i), 0, 4, 1)));
3440 } else {
3441 /* From the Haswell PRM section "Register Region Restrictions":
3442 *
3443 * "The lower bits of the AddressImmediate must not overflow to
3444 * change the register address. The lower 5 bits of Address
3445 * Immediate when added to lower 5 bits of address register gives
3446 * the sub-register offset. The upper bits of Address Immediate
3447 * when added to upper bits of address register gives the register
3448 * address. Any overflow from sub-register offset is dropped."
3449 *
3450 * Fortunately, for broadcast, we never have a sub-register offset so
3451 * this isn't an issue.
3452 */
3453 assert(src.subnr == 0);
3454
3455 if (align1) {
3456 const struct brw_reg addr =
3457 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3458 unsigned offset = src.nr * REG_SIZE + src.subnr;
3459 /* Limit in bytes of the signed indirect addressing immediate. */
3460 const unsigned limit = 512;
3461
3462 brw_push_insn_state(p);
3463 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3464 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3465
3466 /* Take into account the component size and horizontal stride. */
3467 assert(src.vstride == src.hstride + src.width);
3468 brw_SHL(p, addr, vec1(idx),
3469 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3470 src.hstride - 1));
3471
3472 /* We can only address up to limit bytes using the indirect
3473 * addressing immediate, account for the difference if the source
3474 * register is above this limit.
3475 */
3476 if (offset >= limit) {
3477 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3478 offset = offset % limit;
3479 }
3480
3481 brw_pop_insn_state(p);
3482
3483 /* Use indirect addressing to fetch the specified component. */
3484 if (type_sz(src.type) > 4 &&
3485 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3486 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3487 *
3488 * "When source or destination datatype is 64b or operation is
3489 * integer DWord multiply, indirect addressing must not be
3490 * used."
3491 *
3492 * To work around both of this issue, we do two integer MOVs
3493 * insead of one 64-bit MOV. Because no double value should ever
3494 * cross a register boundary, it's safe to use the immediate
3495 * offset in the indirect here to handle adding 4 bytes to the
3496 * offset and avoid the extra ADD to the register file.
3497 */
3498 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3499 retype(brw_vec1_indirect(addr.subnr, offset),
3500 BRW_REGISTER_TYPE_D));
3501 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3502 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3503 BRW_REGISTER_TYPE_D));
3504 } else {
3505 brw_MOV(p, dst,
3506 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3507 }
3508 } else {
3509 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3510 * to all bits of a flag register,
3511 */
3512 inst = brw_MOV(p,
3513 brw_null_reg(),
3514 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3515 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3516 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3517 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3518
3519 /* and use predicated SEL to pick the right channel. */
3520 inst = brw_SEL(p, dst,
3521 stride(suboffset(src, 4), 4, 4, 1),
3522 stride(src, 4, 4, 1));
3523 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3524 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3525 }
3526 }
3527
3528 brw_pop_insn_state(p);
3529 }
3530
3531 /**
3532 * This instruction is generated as a single-channel align1 instruction by
3533 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3534 *
3535 * We can't use the typed atomic op in the FS because that has the execution
3536 * mask ANDed with the pixel mask, but we just want to write the one dword for
3537 * all the pixels.
3538 *
3539 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3540 * one u32. So we use the same untyped atomic write message as the pixel
3541 * shader.
3542 *
3543 * The untyped atomic operation requires a BUFFER surface type with RAW
3544 * format, and is only accessible through the legacy DATA_CACHE dataport
3545 * messages.
3546 */
3547 void brw_shader_time_add(struct brw_codegen *p,
3548 struct brw_reg payload,
3549 uint32_t surf_index)
3550 {
3551 const struct gen_device_info *devinfo = p->devinfo;
3552 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3553 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3554 GEN7_SFID_DATAPORT_DATA_CACHE);
3555 assert(devinfo->gen >= 7);
3556
3557 brw_push_insn_state(p);
3558 brw_set_default_access_mode(p, BRW_ALIGN_1);
3559 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3560 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3561 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3562
3563 /* We use brw_vec1_reg and unmasked because we want to increment the given
3564 * offset only once.
3565 */
3566 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3567 BRW_ARF_NULL, 0));
3568 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3569 payload.nr, 0));
3570 brw_set_desc(p, send, (brw_message_desc(devinfo, 2, 0, false) |
3571 brw_dp_untyped_atomic_desc(p, BRW_AOP_ADD, false)));
3572
3573 brw_inst_set_sfid(devinfo, send, sfid);
3574 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3575
3576 brw_pop_insn_state(p);
3577 }
3578
3579
3580 /**
3581 * Emit the SEND message for a barrier
3582 */
3583 void
3584 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3585 {
3586 const struct gen_device_info *devinfo = p->devinfo;
3587 struct brw_inst *inst;
3588
3589 assert(devinfo->gen >= 7);
3590
3591 brw_push_insn_state(p);
3592 brw_set_default_access_mode(p, BRW_ALIGN_1);
3593 inst = next_insn(p, BRW_OPCODE_SEND);
3594 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3595 brw_set_src0(p, inst, src);
3596 brw_set_src1(p, inst, brw_null_reg());
3597 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3598
3599 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3600 brw_inst_set_gateway_notify(devinfo, inst, 1);
3601 brw_inst_set_gateway_subfuncid(devinfo, inst,
3602 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3603
3604 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3605 brw_pop_insn_state(p);
3606 }
3607
3608
3609 /**
3610 * Emit the wait instruction for a barrier
3611 */
3612 void
3613 brw_WAIT(struct brw_codegen *p)
3614 {
3615 const struct gen_device_info *devinfo = p->devinfo;
3616 struct brw_inst *insn;
3617
3618 struct brw_reg src = brw_notification_reg();
3619
3620 insn = next_insn(p, BRW_OPCODE_WAIT);
3621 brw_set_dest(p, insn, src);
3622 brw_set_src0(p, insn, src);
3623 brw_set_src1(p, insn, brw_null_reg());
3624
3625 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3626 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3627 }
3628
3629 /**
3630 * Changes the floating point rounding mode updating the control register
3631 * field defined at cr0.0[5-6] bits. This function supports the changes to
3632 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3633 * Only RTNE and RTZ rounding are enabled at nir.
3634 */
3635 void
3636 brw_rounding_mode(struct brw_codegen *p,
3637 enum brw_rnd_mode mode)
3638 {
3639 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3640
3641 if (bits != BRW_CR0_RND_MODE_MASK) {
3642 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3643 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3644 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3645
3646 /* From the Skylake PRM, Volume 7, page 760:
3647 * "Implementation Restriction on Register Access: When the control
3648 * register is used as an explicit source and/or destination, hardware
3649 * does not ensure execution pipeline coherency. Software must set the
3650 * thread control field to ‘switch’ for an instruction that uses
3651 * control register as an explicit operand."
3652 */
3653 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3654 }
3655
3656 if (bits) {
3657 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3658 brw_imm_ud(bits));
3659 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3660 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3661 }
3662 }