intel/fs: Implement quad swizzles on ICL+.
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Specify the descriptor and extended descriptor immediate for a SEND(C)
364 * message instruction.
365 */
366 void
367 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
368 unsigned desc, unsigned ex_desc)
369 {
370 const struct gen_device_info *devinfo = p->devinfo;
371 assert(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
372 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC);
373 brw_inst_set_src1_file_type(devinfo, inst,
374 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_UD);
375 brw_inst_set_send_desc(devinfo, inst, desc);
376 if (devinfo->gen >= 9)
377 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
378 }
379
380 static void brw_set_math_message( struct brw_codegen *p,
381 brw_inst *inst,
382 unsigned function,
383 unsigned integer_type,
384 bool low_precision,
385 unsigned dataType )
386 {
387 const struct gen_device_info *devinfo = p->devinfo;
388 unsigned msg_length;
389 unsigned response_length;
390
391 /* Infer message length from the function */
392 switch (function) {
393 case BRW_MATH_FUNCTION_POW:
394 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
395 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
396 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
397 msg_length = 2;
398 break;
399 default:
400 msg_length = 1;
401 break;
402 }
403
404 /* Infer response length from the function */
405 switch (function) {
406 case BRW_MATH_FUNCTION_SINCOS:
407 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
408 response_length = 2;
409 break;
410 default:
411 response_length = 1;
412 break;
413 }
414
415 brw_set_desc(p, inst, brw_message_desc(
416 devinfo, msg_length, response_length, false));
417
418 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
419 brw_inst_set_math_msg_function(devinfo, inst, function);
420 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
421 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
422 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
423 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
424 brw_inst_set_saturate(devinfo, inst, 0);
425 }
426
427
428 static void brw_set_ff_sync_message(struct brw_codegen *p,
429 brw_inst *insn,
430 bool allocate,
431 unsigned response_length,
432 bool end_of_thread)
433 {
434 const struct gen_device_info *devinfo = p->devinfo;
435
436 brw_set_desc(p, insn, brw_message_desc(
437 devinfo, 1, response_length, true));
438
439 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
440 brw_inst_set_eot(devinfo, insn, end_of_thread);
441 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
442 brw_inst_set_urb_allocate(devinfo, insn, allocate);
443 /* The following fields are not used by FF_SYNC: */
444 brw_inst_set_urb_global_offset(devinfo, insn, 0);
445 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
446 brw_inst_set_urb_used(devinfo, insn, 0);
447 brw_inst_set_urb_complete(devinfo, insn, 0);
448 }
449
450 static void brw_set_urb_message( struct brw_codegen *p,
451 brw_inst *insn,
452 enum brw_urb_write_flags flags,
453 unsigned msg_length,
454 unsigned response_length,
455 unsigned offset,
456 unsigned swizzle_control )
457 {
458 const struct gen_device_info *devinfo = p->devinfo;
459
460 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
461 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
462 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
463
464 brw_set_desc(p, insn, brw_message_desc(
465 devinfo, msg_length, response_length, true));
466
467 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
468 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
469
470 if (flags & BRW_URB_WRITE_OWORD) {
471 assert(msg_length == 2); /* header + one OWORD of data */
472 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
473 } else {
474 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
475 }
476
477 brw_inst_set_urb_global_offset(devinfo, insn, offset);
478 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
479
480 if (devinfo->gen < 8) {
481 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
482 }
483
484 if (devinfo->gen < 7) {
485 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
486 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
487 } else {
488 brw_inst_set_urb_per_slot_offset(devinfo, insn,
489 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
490 }
491 }
492
493 static void
494 gen7_set_dp_scratch_message(struct brw_codegen *p,
495 brw_inst *inst,
496 bool write,
497 bool dword,
498 bool invalidate_after_read,
499 unsigned num_regs,
500 unsigned addr_offset,
501 unsigned mlen,
502 unsigned rlen,
503 bool header_present)
504 {
505 const struct gen_device_info *devinfo = p->devinfo;
506 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
507 (devinfo->gen >= 8 && num_regs == 8));
508 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
509 num_regs - 1);
510
511 brw_set_desc(p, inst, brw_message_desc(
512 devinfo, mlen, rlen, header_present));
513
514 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
515 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
516 brw_inst_set_scratch_read_write(devinfo, inst, write);
517 brw_inst_set_scratch_type(devinfo, inst, dword);
518 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
519 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
520 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
521 }
522
523 static void
524 brw_inst_set_state(const struct gen_device_info *devinfo,
525 brw_inst *insn,
526 const struct brw_insn_state *state)
527 {
528 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
529 brw_inst_set_group(devinfo, insn, state->group);
530 brw_inst_set_compression(devinfo, insn, state->compressed);
531 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
532 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
533 brw_inst_set_saturate(devinfo, insn, state->saturate);
534 brw_inst_set_pred_control(devinfo, insn, state->predicate);
535 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
536
537 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
538 state->access_mode == BRW_ALIGN_16) {
539 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
540 if (devinfo->gen >= 7)
541 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
542 } else {
543 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
544 if (devinfo->gen >= 7)
545 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
546 }
547
548 if (devinfo->gen >= 6)
549 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
550 }
551
552 #define next_insn brw_next_insn
553 brw_inst *
554 brw_next_insn(struct brw_codegen *p, unsigned opcode)
555 {
556 const struct gen_device_info *devinfo = p->devinfo;
557 brw_inst *insn;
558
559 if (p->nr_insn + 1 > p->store_size) {
560 p->store_size <<= 1;
561 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
562 }
563
564 p->next_insn_offset += 16;
565 insn = &p->store[p->nr_insn++];
566
567 memset(insn, 0, sizeof(*insn));
568 brw_inst_set_opcode(devinfo, insn, opcode);
569
570 /* Apply the default instruction state */
571 brw_inst_set_state(devinfo, insn, p->current);
572
573 return insn;
574 }
575
576 static brw_inst *
577 brw_alu1(struct brw_codegen *p, unsigned opcode,
578 struct brw_reg dest, struct brw_reg src)
579 {
580 brw_inst *insn = next_insn(p, opcode);
581 brw_set_dest(p, insn, dest);
582 brw_set_src0(p, insn, src);
583 return insn;
584 }
585
586 static brw_inst *
587 brw_alu2(struct brw_codegen *p, unsigned opcode,
588 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
589 {
590 /* 64-bit immediates are only supported on 1-src instructions */
591 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
592 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
593
594 brw_inst *insn = next_insn(p, opcode);
595 brw_set_dest(p, insn, dest);
596 brw_set_src0(p, insn, src0);
597 brw_set_src1(p, insn, src1);
598 return insn;
599 }
600
601 static int
602 get_3src_subreg_nr(struct brw_reg reg)
603 {
604 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
605 * use 32-bit units (components 0..7). Since they only support F/D/UD
606 * types, this doesn't lose any flexibility, but uses fewer bits.
607 */
608 return reg.subnr / 4;
609 }
610
611 static enum gen10_align1_3src_vertical_stride
612 to_3src_align1_vstride(enum brw_vertical_stride vstride)
613 {
614 switch (vstride) {
615 case BRW_VERTICAL_STRIDE_0:
616 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
617 case BRW_VERTICAL_STRIDE_2:
618 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
619 case BRW_VERTICAL_STRIDE_4:
620 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
621 case BRW_VERTICAL_STRIDE_8:
622 case BRW_VERTICAL_STRIDE_16:
623 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
624 default:
625 unreachable("invalid vstride");
626 }
627 }
628
629
630 static enum gen10_align1_3src_src_horizontal_stride
631 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
632 {
633 switch (hstride) {
634 case BRW_HORIZONTAL_STRIDE_0:
635 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
636 case BRW_HORIZONTAL_STRIDE_1:
637 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
638 case BRW_HORIZONTAL_STRIDE_2:
639 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
640 case BRW_HORIZONTAL_STRIDE_4:
641 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
642 default:
643 unreachable("invalid hstride");
644 }
645 }
646
647 static brw_inst *
648 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
649 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
650 {
651 const struct gen_device_info *devinfo = p->devinfo;
652 brw_inst *inst = next_insn(p, opcode);
653
654 gen7_convert_mrf_to_grf(p, &dest);
655
656 assert(dest.nr < 128);
657 assert(src0.nr < 128);
658 assert(src1.nr < 128);
659 assert(src2.nr < 128);
660 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
661 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
662 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
663 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
664
665 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
666 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
667 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
668
669 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
670 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
671 BRW_ALIGN1_3SRC_ACCUMULATOR);
672 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
673 } else {
674 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
675 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
676 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
677 }
678 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
679
680 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
681
682 if (brw_reg_type_is_floating_point(dest.type)) {
683 brw_inst_set_3src_a1_exec_type(devinfo, inst,
684 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
685 } else {
686 brw_inst_set_3src_a1_exec_type(devinfo, inst,
687 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
688 }
689
690 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
691 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
692 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
693 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
694
695 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
696 to_3src_align1_vstride(src0.vstride));
697 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
698 to_3src_align1_vstride(src1.vstride));
699 /* no vstride on src2 */
700
701 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
702 to_3src_align1_hstride(src0.hstride));
703 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
704 to_3src_align1_hstride(src1.hstride));
705 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
706 to_3src_align1_hstride(src2.hstride));
707
708 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
709 if (src0.type == BRW_REGISTER_TYPE_NF) {
710 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
711 } else {
712 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
713 }
714 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
715 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
716
717 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
718 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
719 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
720 } else {
721 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
722 }
723 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
724 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
725
726 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
727 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
728 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
729 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
730
731 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
732 src0.file == BRW_IMMEDIATE_VALUE ||
733 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
734 src0.type == BRW_REGISTER_TYPE_NF));
735 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
736 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
737 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
738 src2.file == BRW_IMMEDIATE_VALUE);
739
740 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
741 src0.file == BRW_GENERAL_REGISTER_FILE ?
742 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
743 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
744 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
745 src1.file == BRW_GENERAL_REGISTER_FILE ?
746 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
747 BRW_ALIGN1_3SRC_ACCUMULATOR);
748 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
749 src2.file == BRW_GENERAL_REGISTER_FILE ?
750 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
751 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
752 } else {
753 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
754 dest.file == BRW_MESSAGE_REGISTER_FILE);
755 assert(dest.type == BRW_REGISTER_TYPE_F ||
756 dest.type == BRW_REGISTER_TYPE_DF ||
757 dest.type == BRW_REGISTER_TYPE_D ||
758 dest.type == BRW_REGISTER_TYPE_UD);
759 if (devinfo->gen == 6) {
760 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
761 dest.file == BRW_MESSAGE_REGISTER_FILE);
762 }
763 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
764 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
765 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
766
767 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
768 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
769 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
770 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
771 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
772 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
773 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
774 src0.vstride == BRW_VERTICAL_STRIDE_0);
775
776 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
777 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
778 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
779 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
780 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
781 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
782 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
783 src1.vstride == BRW_VERTICAL_STRIDE_0);
784
785 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
786 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
787 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
788 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
789 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
790 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
791 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
792 src2.vstride == BRW_VERTICAL_STRIDE_0);
793
794 if (devinfo->gen >= 7) {
795 /* Set both the source and destination types based on dest.type,
796 * ignoring the source register types. The MAD and LRP emitters ensure
797 * that all four types are float. The BFE and BFI2 emitters, however,
798 * may send us mixed D and UD types and want us to ignore that and use
799 * the destination type.
800 */
801 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
802 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
803 }
804 }
805
806 return inst;
807 }
808
809
810 /***********************************************************************
811 * Convenience routines.
812 */
813 #define ALU1(OP) \
814 brw_inst *brw_##OP(struct brw_codegen *p, \
815 struct brw_reg dest, \
816 struct brw_reg src0) \
817 { \
818 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
819 }
820
821 #define ALU2(OP) \
822 brw_inst *brw_##OP(struct brw_codegen *p, \
823 struct brw_reg dest, \
824 struct brw_reg src0, \
825 struct brw_reg src1) \
826 { \
827 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
828 }
829
830 #define ALU3(OP) \
831 brw_inst *brw_##OP(struct brw_codegen *p, \
832 struct brw_reg dest, \
833 struct brw_reg src0, \
834 struct brw_reg src1, \
835 struct brw_reg src2) \
836 { \
837 if (p->current->access_mode == BRW_ALIGN_16) { \
838 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
839 src0.swizzle = BRW_SWIZZLE_XXXX; \
840 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
841 src1.swizzle = BRW_SWIZZLE_XXXX; \
842 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
843 src2.swizzle = BRW_SWIZZLE_XXXX; \
844 } \
845 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
846 }
847
848 #define ALU3F(OP) \
849 brw_inst *brw_##OP(struct brw_codegen *p, \
850 struct brw_reg dest, \
851 struct brw_reg src0, \
852 struct brw_reg src1, \
853 struct brw_reg src2) \
854 { \
855 assert(dest.type == BRW_REGISTER_TYPE_F || \
856 dest.type == BRW_REGISTER_TYPE_DF); \
857 if (dest.type == BRW_REGISTER_TYPE_F) { \
858 assert(src0.type == BRW_REGISTER_TYPE_F); \
859 assert(src1.type == BRW_REGISTER_TYPE_F); \
860 assert(src2.type == BRW_REGISTER_TYPE_F); \
861 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
862 assert(src0.type == BRW_REGISTER_TYPE_DF); \
863 assert(src1.type == BRW_REGISTER_TYPE_DF); \
864 assert(src2.type == BRW_REGISTER_TYPE_DF); \
865 } \
866 \
867 if (p->current->access_mode == BRW_ALIGN_16) { \
868 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
869 src0.swizzle = BRW_SWIZZLE_XXXX; \
870 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
871 src1.swizzle = BRW_SWIZZLE_XXXX; \
872 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
873 src2.swizzle = BRW_SWIZZLE_XXXX; \
874 } \
875 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
876 }
877
878 /* Rounding operations (other than RNDD) require two instructions - the first
879 * stores a rounded value (possibly the wrong way) in the dest register, but
880 * also sets a per-channel "increment bit" in the flag register. A predicated
881 * add of 1.0 fixes dest to contain the desired result.
882 *
883 * Sandybridge and later appear to round correctly without an ADD.
884 */
885 #define ROUND(OP) \
886 void brw_##OP(struct brw_codegen *p, \
887 struct brw_reg dest, \
888 struct brw_reg src) \
889 { \
890 const struct gen_device_info *devinfo = p->devinfo; \
891 brw_inst *rnd, *add; \
892 rnd = next_insn(p, BRW_OPCODE_##OP); \
893 brw_set_dest(p, rnd, dest); \
894 brw_set_src0(p, rnd, src); \
895 \
896 if (devinfo->gen < 6) { \
897 /* turn on round-increments */ \
898 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
899 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
900 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
901 } \
902 }
903
904
905 ALU2(SEL)
906 ALU1(NOT)
907 ALU2(AND)
908 ALU2(OR)
909 ALU2(XOR)
910 ALU2(SHR)
911 ALU2(SHL)
912 ALU1(DIM)
913 ALU2(ASR)
914 ALU3(CSEL)
915 ALU1(FRC)
916 ALU1(RNDD)
917 ALU2(MAC)
918 ALU2(MACH)
919 ALU1(LZD)
920 ALU2(DP4)
921 ALU2(DPH)
922 ALU2(DP3)
923 ALU2(DP2)
924 ALU3(MAD)
925 ALU3F(LRP)
926 ALU1(BFREV)
927 ALU3(BFE)
928 ALU2(BFI1)
929 ALU3(BFI2)
930 ALU1(FBH)
931 ALU1(FBL)
932 ALU1(CBIT)
933 ALU2(ADDC)
934 ALU2(SUBB)
935
936 ROUND(RNDZ)
937 ROUND(RNDE)
938
939 brw_inst *
940 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
941 {
942 const struct gen_device_info *devinfo = p->devinfo;
943
944 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
945 * To avoid the problems that causes, we use a <1,2,0> source region to read
946 * each element twice.
947 */
948 if (devinfo->gen == 7 && !devinfo->is_haswell &&
949 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
950 dest.type == BRW_REGISTER_TYPE_DF &&
951 (src0.type == BRW_REGISTER_TYPE_F ||
952 src0.type == BRW_REGISTER_TYPE_D ||
953 src0.type == BRW_REGISTER_TYPE_UD) &&
954 !has_scalar_region(src0)) {
955 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
956 src0.width == BRW_WIDTH_4 &&
957 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
958
959 src0.vstride = BRW_VERTICAL_STRIDE_1;
960 src0.width = BRW_WIDTH_2;
961 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
962 }
963
964 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
965 }
966
967 brw_inst *
968 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
969 struct brw_reg src0, struct brw_reg src1)
970 {
971 /* 6.2.2: add */
972 if (src0.type == BRW_REGISTER_TYPE_F ||
973 (src0.file == BRW_IMMEDIATE_VALUE &&
974 src0.type == BRW_REGISTER_TYPE_VF)) {
975 assert(src1.type != BRW_REGISTER_TYPE_UD);
976 assert(src1.type != BRW_REGISTER_TYPE_D);
977 }
978
979 if (src1.type == BRW_REGISTER_TYPE_F ||
980 (src1.file == BRW_IMMEDIATE_VALUE &&
981 src1.type == BRW_REGISTER_TYPE_VF)) {
982 assert(src0.type != BRW_REGISTER_TYPE_UD);
983 assert(src0.type != BRW_REGISTER_TYPE_D);
984 }
985
986 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
987 }
988
989 brw_inst *
990 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
991 struct brw_reg src0, struct brw_reg src1)
992 {
993 assert(dest.type == src0.type);
994 assert(src0.type == src1.type);
995 switch (src0.type) {
996 case BRW_REGISTER_TYPE_B:
997 case BRW_REGISTER_TYPE_UB:
998 case BRW_REGISTER_TYPE_W:
999 case BRW_REGISTER_TYPE_UW:
1000 case BRW_REGISTER_TYPE_D:
1001 case BRW_REGISTER_TYPE_UD:
1002 break;
1003 default:
1004 unreachable("Bad type for brw_AVG");
1005 }
1006
1007 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1008 }
1009
1010 brw_inst *
1011 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1012 struct brw_reg src0, struct brw_reg src1)
1013 {
1014 /* 6.32.38: mul */
1015 if (src0.type == BRW_REGISTER_TYPE_D ||
1016 src0.type == BRW_REGISTER_TYPE_UD ||
1017 src1.type == BRW_REGISTER_TYPE_D ||
1018 src1.type == BRW_REGISTER_TYPE_UD) {
1019 assert(dest.type != BRW_REGISTER_TYPE_F);
1020 }
1021
1022 if (src0.type == BRW_REGISTER_TYPE_F ||
1023 (src0.file == BRW_IMMEDIATE_VALUE &&
1024 src0.type == BRW_REGISTER_TYPE_VF)) {
1025 assert(src1.type != BRW_REGISTER_TYPE_UD);
1026 assert(src1.type != BRW_REGISTER_TYPE_D);
1027 }
1028
1029 if (src1.type == BRW_REGISTER_TYPE_F ||
1030 (src1.file == BRW_IMMEDIATE_VALUE &&
1031 src1.type == BRW_REGISTER_TYPE_VF)) {
1032 assert(src0.type != BRW_REGISTER_TYPE_UD);
1033 assert(src0.type != BRW_REGISTER_TYPE_D);
1034 }
1035
1036 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1037 src0.nr != BRW_ARF_ACCUMULATOR);
1038 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1039 src1.nr != BRW_ARF_ACCUMULATOR);
1040
1041 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1042 }
1043
1044 brw_inst *
1045 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1046 struct brw_reg src0, struct brw_reg src1)
1047 {
1048 src0.vstride = BRW_VERTICAL_STRIDE_0;
1049 src0.width = BRW_WIDTH_1;
1050 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1051 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1052 }
1053
1054 brw_inst *
1055 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1056 struct brw_reg src0, struct brw_reg src1)
1057 {
1058 src0.vstride = BRW_VERTICAL_STRIDE_0;
1059 src0.width = BRW_WIDTH_1;
1060 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1061 src1.vstride = BRW_VERTICAL_STRIDE_8;
1062 src1.width = BRW_WIDTH_8;
1063 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1064 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1065 }
1066
1067 brw_inst *
1068 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1069 {
1070 const struct gen_device_info *devinfo = p->devinfo;
1071 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1072 /* The F32TO16 instruction doesn't support 32-bit destination types in
1073 * Align1 mode, and neither does the Gen8 implementation in terms of a
1074 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1075 * an undocumented feature.
1076 */
1077 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1078 (!align16 || devinfo->gen >= 8));
1079 brw_inst *inst;
1080
1081 if (align16) {
1082 assert(dst.type == BRW_REGISTER_TYPE_UD);
1083 } else {
1084 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1085 dst.type == BRW_REGISTER_TYPE_W ||
1086 dst.type == BRW_REGISTER_TYPE_UW ||
1087 dst.type == BRW_REGISTER_TYPE_HF);
1088 }
1089
1090 brw_push_insn_state(p);
1091
1092 if (needs_zero_fill) {
1093 brw_set_default_access_mode(p, BRW_ALIGN_1);
1094 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1095 }
1096
1097 if (devinfo->gen >= 8) {
1098 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1099 } else {
1100 assert(devinfo->gen == 7);
1101 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1102 }
1103
1104 if (needs_zero_fill) {
1105 brw_inst_set_no_dd_clear(devinfo, inst, true);
1106 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1107 brw_inst_set_no_dd_check(devinfo, inst, true);
1108 }
1109
1110 brw_pop_insn_state(p);
1111 return inst;
1112 }
1113
1114 brw_inst *
1115 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1116 {
1117 const struct gen_device_info *devinfo = p->devinfo;
1118 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1119
1120 if (align16) {
1121 assert(src.type == BRW_REGISTER_TYPE_UD);
1122 } else {
1123 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1124 *
1125 * Because this instruction does not have a 16-bit floating-point
1126 * type, the source data type must be Word (W). The destination type
1127 * must be F (Float).
1128 */
1129 if (src.type == BRW_REGISTER_TYPE_UD)
1130 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1131
1132 assert(src.type == BRW_REGISTER_TYPE_W ||
1133 src.type == BRW_REGISTER_TYPE_UW ||
1134 src.type == BRW_REGISTER_TYPE_HF);
1135 }
1136
1137 if (devinfo->gen >= 8) {
1138 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1139 } else {
1140 assert(devinfo->gen == 7);
1141 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1142 }
1143 }
1144
1145
1146 void brw_NOP(struct brw_codegen *p)
1147 {
1148 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1149 memset(insn, 0, sizeof(*insn));
1150 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1151 }
1152
1153
1154
1155
1156
1157 /***********************************************************************
1158 * Comparisons, if/else/endif
1159 */
1160
1161 brw_inst *
1162 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1163 unsigned predicate_control)
1164 {
1165 const struct gen_device_info *devinfo = p->devinfo;
1166 struct brw_reg ip = brw_ip_reg();
1167 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1168
1169 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1170 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1171 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1172 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1173
1174 return inst;
1175 }
1176
1177 static void
1178 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1179 {
1180 p->if_stack[p->if_stack_depth] = inst - p->store;
1181
1182 p->if_stack_depth++;
1183 if (p->if_stack_array_size <= p->if_stack_depth) {
1184 p->if_stack_array_size *= 2;
1185 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1186 p->if_stack_array_size);
1187 }
1188 }
1189
1190 static brw_inst *
1191 pop_if_stack(struct brw_codegen *p)
1192 {
1193 p->if_stack_depth--;
1194 return &p->store[p->if_stack[p->if_stack_depth]];
1195 }
1196
1197 static void
1198 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1199 {
1200 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1201 p->loop_stack_array_size *= 2;
1202 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1203 p->loop_stack_array_size);
1204 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1205 p->loop_stack_array_size);
1206 }
1207
1208 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1209 p->loop_stack_depth++;
1210 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1211 }
1212
1213 static brw_inst *
1214 get_inner_do_insn(struct brw_codegen *p)
1215 {
1216 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1217 }
1218
1219 /* EU takes the value from the flag register and pushes it onto some
1220 * sort of a stack (presumably merging with any flag value already on
1221 * the stack). Within an if block, the flags at the top of the stack
1222 * control execution on each channel of the unit, eg. on each of the
1223 * 16 pixel values in our wm programs.
1224 *
1225 * When the matching 'else' instruction is reached (presumably by
1226 * countdown of the instruction count patched in by our ELSE/ENDIF
1227 * functions), the relevant flags are inverted.
1228 *
1229 * When the matching 'endif' instruction is reached, the flags are
1230 * popped off. If the stack is now empty, normal execution resumes.
1231 */
1232 brw_inst *
1233 brw_IF(struct brw_codegen *p, unsigned execute_size)
1234 {
1235 const struct gen_device_info *devinfo = p->devinfo;
1236 brw_inst *insn;
1237
1238 insn = next_insn(p, BRW_OPCODE_IF);
1239
1240 /* Override the defaults for this instruction:
1241 */
1242 if (devinfo->gen < 6) {
1243 brw_set_dest(p, insn, brw_ip_reg());
1244 brw_set_src0(p, insn, brw_ip_reg());
1245 brw_set_src1(p, insn, brw_imm_d(0x0));
1246 } else if (devinfo->gen == 6) {
1247 brw_set_dest(p, insn, brw_imm_w(0));
1248 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1249 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1250 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1251 } else if (devinfo->gen == 7) {
1252 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1253 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1254 brw_set_src1(p, insn, brw_imm_w(0));
1255 brw_inst_set_jip(devinfo, insn, 0);
1256 brw_inst_set_uip(devinfo, insn, 0);
1257 } else {
1258 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1259 brw_set_src0(p, insn, brw_imm_d(0));
1260 brw_inst_set_jip(devinfo, insn, 0);
1261 brw_inst_set_uip(devinfo, insn, 0);
1262 }
1263
1264 brw_inst_set_exec_size(devinfo, insn, execute_size);
1265 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1266 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1267 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1268 if (!p->single_program_flow && devinfo->gen < 6)
1269 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1270
1271 push_if_stack(p, insn);
1272 p->if_depth_in_loop[p->loop_stack_depth]++;
1273 return insn;
1274 }
1275
1276 /* This function is only used for gen6-style IF instructions with an
1277 * embedded comparison (conditional modifier). It is not used on gen7.
1278 */
1279 brw_inst *
1280 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1281 struct brw_reg src0, struct brw_reg src1)
1282 {
1283 const struct gen_device_info *devinfo = p->devinfo;
1284 brw_inst *insn;
1285
1286 insn = next_insn(p, BRW_OPCODE_IF);
1287
1288 brw_set_dest(p, insn, brw_imm_w(0));
1289 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1290 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1291 brw_set_src0(p, insn, src0);
1292 brw_set_src1(p, insn, src1);
1293
1294 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1295 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1296 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1297
1298 push_if_stack(p, insn);
1299 return insn;
1300 }
1301
1302 /**
1303 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1304 */
1305 static void
1306 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1307 brw_inst *if_inst, brw_inst *else_inst)
1308 {
1309 const struct gen_device_info *devinfo = p->devinfo;
1310
1311 /* The next instruction (where the ENDIF would be, if it existed) */
1312 brw_inst *next_inst = &p->store[p->nr_insn];
1313
1314 assert(p->single_program_flow);
1315 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1316 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1317 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1318
1319 /* Convert IF to an ADD instruction that moves the instruction pointer
1320 * to the first instruction of the ELSE block. If there is no ELSE
1321 * block, point to where ENDIF would be. Reverse the predicate.
1322 *
1323 * There's no need to execute an ENDIF since we don't need to do any
1324 * stack operations, and if we're currently executing, we just want to
1325 * continue normally.
1326 */
1327 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1328 brw_inst_set_pred_inv(devinfo, if_inst, true);
1329
1330 if (else_inst != NULL) {
1331 /* Convert ELSE to an ADD instruction that points where the ENDIF
1332 * would be.
1333 */
1334 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1335
1336 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1337 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1338 } else {
1339 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1340 }
1341 }
1342
1343 /**
1344 * Patch IF and ELSE instructions with appropriate jump targets.
1345 */
1346 static void
1347 patch_IF_ELSE(struct brw_codegen *p,
1348 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1349 {
1350 const struct gen_device_info *devinfo = p->devinfo;
1351
1352 /* We shouldn't be patching IF and ELSE instructions in single program flow
1353 * mode when gen < 6, because in single program flow mode on those
1354 * platforms, we convert flow control instructions to conditional ADDs that
1355 * operate on IP (see brw_ENDIF).
1356 *
1357 * However, on Gen6, writing to IP doesn't work in single program flow mode
1358 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1359 * not be updated by non-flow control instructions."). And on later
1360 * platforms, there is no significant benefit to converting control flow
1361 * instructions to conditional ADDs. So we do patch IF and ELSE
1362 * instructions in single program flow mode on those platforms.
1363 */
1364 if (devinfo->gen < 6)
1365 assert(!p->single_program_flow);
1366
1367 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1368 assert(endif_inst != NULL);
1369 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1370
1371 unsigned br = brw_jump_scale(devinfo);
1372
1373 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1374 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1375
1376 if (else_inst == NULL) {
1377 /* Patch IF -> ENDIF */
1378 if (devinfo->gen < 6) {
1379 /* Turn it into an IFF, which means no mask stack operations for
1380 * all-false and jumping past the ENDIF.
1381 */
1382 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1383 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1384 br * (endif_inst - if_inst + 1));
1385 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1386 } else if (devinfo->gen == 6) {
1387 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1388 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1389 } else {
1390 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1391 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1392 }
1393 } else {
1394 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1395
1396 /* Patch IF -> ELSE */
1397 if (devinfo->gen < 6) {
1398 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1399 br * (else_inst - if_inst));
1400 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1401 } else if (devinfo->gen == 6) {
1402 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1403 br * (else_inst - if_inst + 1));
1404 }
1405
1406 /* Patch ELSE -> ENDIF */
1407 if (devinfo->gen < 6) {
1408 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1409 * matching ENDIF.
1410 */
1411 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1412 br * (endif_inst - else_inst + 1));
1413 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1414 } else if (devinfo->gen == 6) {
1415 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1416 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1417 br * (endif_inst - else_inst));
1418 } else {
1419 /* The IF instruction's JIP should point just past the ELSE */
1420 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1421 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1422 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1423 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1424 if (devinfo->gen >= 8) {
1425 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1426 * should point to ENDIF.
1427 */
1428 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1429 }
1430 }
1431 }
1432 }
1433
1434 void
1435 brw_ELSE(struct brw_codegen *p)
1436 {
1437 const struct gen_device_info *devinfo = p->devinfo;
1438 brw_inst *insn;
1439
1440 insn = next_insn(p, BRW_OPCODE_ELSE);
1441
1442 if (devinfo->gen < 6) {
1443 brw_set_dest(p, insn, brw_ip_reg());
1444 brw_set_src0(p, insn, brw_ip_reg());
1445 brw_set_src1(p, insn, brw_imm_d(0x0));
1446 } else if (devinfo->gen == 6) {
1447 brw_set_dest(p, insn, brw_imm_w(0));
1448 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1449 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1450 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1451 } else if (devinfo->gen == 7) {
1452 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1453 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1454 brw_set_src1(p, insn, brw_imm_w(0));
1455 brw_inst_set_jip(devinfo, insn, 0);
1456 brw_inst_set_uip(devinfo, insn, 0);
1457 } else {
1458 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1459 brw_set_src0(p, insn, brw_imm_d(0));
1460 brw_inst_set_jip(devinfo, insn, 0);
1461 brw_inst_set_uip(devinfo, insn, 0);
1462 }
1463
1464 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1465 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1466 if (!p->single_program_flow && devinfo->gen < 6)
1467 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1468
1469 push_if_stack(p, insn);
1470 }
1471
1472 void
1473 brw_ENDIF(struct brw_codegen *p)
1474 {
1475 const struct gen_device_info *devinfo = p->devinfo;
1476 brw_inst *insn = NULL;
1477 brw_inst *else_inst = NULL;
1478 brw_inst *if_inst = NULL;
1479 brw_inst *tmp;
1480 bool emit_endif = true;
1481
1482 /* In single program flow mode, we can express IF and ELSE instructions
1483 * equivalently as ADD instructions that operate on IP. On platforms prior
1484 * to Gen6, flow control instructions cause an implied thread switch, so
1485 * this is a significant savings.
1486 *
1487 * However, on Gen6, writing to IP doesn't work in single program flow mode
1488 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1489 * not be updated by non-flow control instructions."). And on later
1490 * platforms, there is no significant benefit to converting control flow
1491 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1492 * Gen5.
1493 */
1494 if (devinfo->gen < 6 && p->single_program_flow)
1495 emit_endif = false;
1496
1497 /*
1498 * A single next_insn() may change the base address of instruction store
1499 * memory(p->store), so call it first before referencing the instruction
1500 * store pointer from an index
1501 */
1502 if (emit_endif)
1503 insn = next_insn(p, BRW_OPCODE_ENDIF);
1504
1505 /* Pop the IF and (optional) ELSE instructions from the stack */
1506 p->if_depth_in_loop[p->loop_stack_depth]--;
1507 tmp = pop_if_stack(p);
1508 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1509 else_inst = tmp;
1510 tmp = pop_if_stack(p);
1511 }
1512 if_inst = tmp;
1513
1514 if (!emit_endif) {
1515 /* ENDIF is useless; don't bother emitting it. */
1516 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1517 return;
1518 }
1519
1520 if (devinfo->gen < 6) {
1521 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1522 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1523 brw_set_src1(p, insn, brw_imm_d(0x0));
1524 } else if (devinfo->gen == 6) {
1525 brw_set_dest(p, insn, brw_imm_w(0));
1526 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1527 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1528 } else if (devinfo->gen == 7) {
1529 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1530 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1531 brw_set_src1(p, insn, brw_imm_w(0));
1532 } else {
1533 brw_set_src0(p, insn, brw_imm_d(0));
1534 }
1535
1536 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1537 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1538 if (devinfo->gen < 6)
1539 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1540
1541 /* Also pop item off the stack in the endif instruction: */
1542 if (devinfo->gen < 6) {
1543 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1544 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1545 } else if (devinfo->gen == 6) {
1546 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1547 } else {
1548 brw_inst_set_jip(devinfo, insn, 2);
1549 }
1550 patch_IF_ELSE(p, if_inst, else_inst, insn);
1551 }
1552
1553 brw_inst *
1554 brw_BREAK(struct brw_codegen *p)
1555 {
1556 const struct gen_device_info *devinfo = p->devinfo;
1557 brw_inst *insn;
1558
1559 insn = next_insn(p, BRW_OPCODE_BREAK);
1560 if (devinfo->gen >= 8) {
1561 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1562 brw_set_src0(p, insn, brw_imm_d(0x0));
1563 } else if (devinfo->gen >= 6) {
1564 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1565 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1566 brw_set_src1(p, insn, brw_imm_d(0x0));
1567 } else {
1568 brw_set_dest(p, insn, brw_ip_reg());
1569 brw_set_src0(p, insn, brw_ip_reg());
1570 brw_set_src1(p, insn, brw_imm_d(0x0));
1571 brw_inst_set_gen4_pop_count(devinfo, insn,
1572 p->if_depth_in_loop[p->loop_stack_depth]);
1573 }
1574 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1575 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1576
1577 return insn;
1578 }
1579
1580 brw_inst *
1581 brw_CONT(struct brw_codegen *p)
1582 {
1583 const struct gen_device_info *devinfo = p->devinfo;
1584 brw_inst *insn;
1585
1586 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1587 brw_set_dest(p, insn, brw_ip_reg());
1588 if (devinfo->gen >= 8) {
1589 brw_set_src0(p, insn, brw_imm_d(0x0));
1590 } else {
1591 brw_set_src0(p, insn, brw_ip_reg());
1592 brw_set_src1(p, insn, brw_imm_d(0x0));
1593 }
1594
1595 if (devinfo->gen < 6) {
1596 brw_inst_set_gen4_pop_count(devinfo, insn,
1597 p->if_depth_in_loop[p->loop_stack_depth]);
1598 }
1599 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1600 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1601 return insn;
1602 }
1603
1604 brw_inst *
1605 gen6_HALT(struct brw_codegen *p)
1606 {
1607 const struct gen_device_info *devinfo = p->devinfo;
1608 brw_inst *insn;
1609
1610 insn = next_insn(p, BRW_OPCODE_HALT);
1611 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1612 if (devinfo->gen >= 8) {
1613 brw_set_src0(p, insn, brw_imm_d(0x0));
1614 } else {
1615 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1616 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1617 }
1618
1619 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1620 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1621 return insn;
1622 }
1623
1624 /* DO/WHILE loop:
1625 *
1626 * The DO/WHILE is just an unterminated loop -- break or continue are
1627 * used for control within the loop. We have a few ways they can be
1628 * done.
1629 *
1630 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1631 * jip and no DO instruction.
1632 *
1633 * For non-uniform control flow pre-gen6, there's a DO instruction to
1634 * push the mask, and a WHILE to jump back, and BREAK to get out and
1635 * pop the mask.
1636 *
1637 * For gen6, there's no more mask stack, so no need for DO. WHILE
1638 * just points back to the first instruction of the loop.
1639 */
1640 brw_inst *
1641 brw_DO(struct brw_codegen *p, unsigned execute_size)
1642 {
1643 const struct gen_device_info *devinfo = p->devinfo;
1644
1645 if (devinfo->gen >= 6 || p->single_program_flow) {
1646 push_loop_stack(p, &p->store[p->nr_insn]);
1647 return &p->store[p->nr_insn];
1648 } else {
1649 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1650
1651 push_loop_stack(p, insn);
1652
1653 /* Override the defaults for this instruction:
1654 */
1655 brw_set_dest(p, insn, brw_null_reg());
1656 brw_set_src0(p, insn, brw_null_reg());
1657 brw_set_src1(p, insn, brw_null_reg());
1658
1659 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1660 brw_inst_set_exec_size(devinfo, insn, execute_size);
1661 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1662
1663 return insn;
1664 }
1665 }
1666
1667 /**
1668 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1669 * instruction here.
1670 *
1671 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1672 * nesting, since it can always just point to the end of the block/current loop.
1673 */
1674 static void
1675 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1676 {
1677 const struct gen_device_info *devinfo = p->devinfo;
1678 brw_inst *do_inst = get_inner_do_insn(p);
1679 brw_inst *inst;
1680 unsigned br = brw_jump_scale(devinfo);
1681
1682 assert(devinfo->gen < 6);
1683
1684 for (inst = while_inst - 1; inst != do_inst; inst--) {
1685 /* If the jump count is != 0, that means that this instruction has already
1686 * been patched because it's part of a loop inside of the one we're
1687 * patching.
1688 */
1689 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1690 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1691 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1692 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1693 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1694 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1695 }
1696 }
1697 }
1698
1699 brw_inst *
1700 brw_WHILE(struct brw_codegen *p)
1701 {
1702 const struct gen_device_info *devinfo = p->devinfo;
1703 brw_inst *insn, *do_insn;
1704 unsigned br = brw_jump_scale(devinfo);
1705
1706 if (devinfo->gen >= 6) {
1707 insn = next_insn(p, BRW_OPCODE_WHILE);
1708 do_insn = get_inner_do_insn(p);
1709
1710 if (devinfo->gen >= 8) {
1711 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1712 brw_set_src0(p, insn, brw_imm_d(0));
1713 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1714 } else if (devinfo->gen == 7) {
1715 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1716 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1717 brw_set_src1(p, insn, brw_imm_w(0));
1718 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1719 } else {
1720 brw_set_dest(p, insn, brw_imm_w(0));
1721 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1722 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1723 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1724 }
1725
1726 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1727
1728 } else {
1729 if (p->single_program_flow) {
1730 insn = next_insn(p, BRW_OPCODE_ADD);
1731 do_insn = get_inner_do_insn(p);
1732
1733 brw_set_dest(p, insn, brw_ip_reg());
1734 brw_set_src0(p, insn, brw_ip_reg());
1735 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1736 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1737 } else {
1738 insn = next_insn(p, BRW_OPCODE_WHILE);
1739 do_insn = get_inner_do_insn(p);
1740
1741 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1742
1743 brw_set_dest(p, insn, brw_ip_reg());
1744 brw_set_src0(p, insn, brw_ip_reg());
1745 brw_set_src1(p, insn, brw_imm_d(0));
1746
1747 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1748 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1749 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1750
1751 brw_patch_break_cont(p, insn);
1752 }
1753 }
1754 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1755
1756 p->loop_stack_depth--;
1757
1758 return insn;
1759 }
1760
1761 /* FORWARD JUMPS:
1762 */
1763 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1764 {
1765 const struct gen_device_info *devinfo = p->devinfo;
1766 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1767 unsigned jmpi = 1;
1768
1769 if (devinfo->gen >= 5)
1770 jmpi = 2;
1771
1772 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1773 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1774
1775 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1776 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1777 }
1778
1779 /* To integrate with the above, it makes sense that the comparison
1780 * instruction should populate the flag register. It might be simpler
1781 * just to use the flag reg for most WM tasks?
1782 */
1783 void brw_CMP(struct brw_codegen *p,
1784 struct brw_reg dest,
1785 unsigned conditional,
1786 struct brw_reg src0,
1787 struct brw_reg src1)
1788 {
1789 const struct gen_device_info *devinfo = p->devinfo;
1790 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1791
1792 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1793 brw_set_dest(p, insn, dest);
1794 brw_set_src0(p, insn, src0);
1795 brw_set_src1(p, insn, src1);
1796
1797 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1798 * page says:
1799 * "Any CMP instruction with a null destination must use a {switch}."
1800 *
1801 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1802 * mentioned on their work-arounds pages.
1803 */
1804 if (devinfo->gen == 7) {
1805 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1806 dest.nr == BRW_ARF_NULL) {
1807 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1808 }
1809 }
1810 }
1811
1812 /***********************************************************************
1813 * Helpers for the various SEND message types:
1814 */
1815
1816 /** Extended math function, float[8].
1817 */
1818 void gen4_math(struct brw_codegen *p,
1819 struct brw_reg dest,
1820 unsigned function,
1821 unsigned msg_reg_nr,
1822 struct brw_reg src,
1823 unsigned precision )
1824 {
1825 const struct gen_device_info *devinfo = p->devinfo;
1826 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1827 unsigned data_type;
1828 if (has_scalar_region(src)) {
1829 data_type = BRW_MATH_DATA_SCALAR;
1830 } else {
1831 data_type = BRW_MATH_DATA_VECTOR;
1832 }
1833
1834 assert(devinfo->gen < 6);
1835
1836 /* Example code doesn't set predicate_control for send
1837 * instructions.
1838 */
1839 brw_inst_set_pred_control(devinfo, insn, 0);
1840 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1841
1842 brw_set_dest(p, insn, dest);
1843 brw_set_src0(p, insn, src);
1844 brw_set_math_message(p,
1845 insn,
1846 function,
1847 src.type == BRW_REGISTER_TYPE_D,
1848 precision,
1849 data_type);
1850 }
1851
1852 void gen6_math(struct brw_codegen *p,
1853 struct brw_reg dest,
1854 unsigned function,
1855 struct brw_reg src0,
1856 struct brw_reg src1)
1857 {
1858 const struct gen_device_info *devinfo = p->devinfo;
1859 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1860
1861 assert(devinfo->gen >= 6);
1862
1863 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1864 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1865
1866 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1867 if (devinfo->gen == 6) {
1868 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1869 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1870 }
1871
1872 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1873 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1874 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1875 assert(src0.type != BRW_REGISTER_TYPE_F);
1876 assert(src1.type != BRW_REGISTER_TYPE_F);
1877 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1878 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1879 } else {
1880 assert(src0.type == BRW_REGISTER_TYPE_F);
1881 assert(src1.type == BRW_REGISTER_TYPE_F);
1882 }
1883
1884 /* Source modifiers are ignored for extended math instructions on Gen6. */
1885 if (devinfo->gen == 6) {
1886 assert(!src0.negate);
1887 assert(!src0.abs);
1888 assert(!src1.negate);
1889 assert(!src1.abs);
1890 }
1891
1892 brw_inst_set_math_function(devinfo, insn, function);
1893
1894 brw_set_dest(p, insn, dest);
1895 brw_set_src0(p, insn, src0);
1896 brw_set_src1(p, insn, src1);
1897 }
1898
1899 /**
1900 * Return the right surface index to access the thread scratch space using
1901 * stateless dataport messages.
1902 */
1903 unsigned
1904 brw_scratch_surface_idx(const struct brw_codegen *p)
1905 {
1906 /* The scratch space is thread-local so IA coherency is unnecessary. */
1907 if (p->devinfo->gen >= 8)
1908 return GEN8_BTI_STATELESS_NON_COHERENT;
1909 else
1910 return BRW_BTI_STATELESS;
1911 }
1912
1913 /**
1914 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1915 * using a constant offset per channel.
1916 *
1917 * The offset must be aligned to oword size (16 bytes). Used for
1918 * register spilling.
1919 */
1920 void brw_oword_block_write_scratch(struct brw_codegen *p,
1921 struct brw_reg mrf,
1922 int num_regs,
1923 unsigned offset)
1924 {
1925 const struct gen_device_info *devinfo = p->devinfo;
1926 const unsigned target_cache =
1927 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1928 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1929 BRW_SFID_DATAPORT_WRITE);
1930 uint32_t msg_type;
1931
1932 if (devinfo->gen >= 6)
1933 offset /= 16;
1934
1935 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1936
1937 const unsigned mlen = 1 + num_regs;
1938
1939 /* Set up the message header. This is g0, with g0.2 filled with
1940 * the offset. We don't want to leave our offset around in g0 or
1941 * it'll screw up texture samples, so set it up inside the message
1942 * reg.
1943 */
1944 {
1945 brw_push_insn_state(p);
1946 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1947 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1948 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1949
1950 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1951
1952 /* set message header global offset field (reg 0, element 2) */
1953 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1954 brw_MOV(p,
1955 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1956 mrf.nr,
1957 2), BRW_REGISTER_TYPE_UD),
1958 brw_imm_ud(offset));
1959
1960 brw_pop_insn_state(p);
1961 }
1962
1963 {
1964 struct brw_reg dest;
1965 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1966 int send_commit_msg;
1967 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1968 BRW_REGISTER_TYPE_UW);
1969
1970 brw_inst_set_sfid(devinfo, insn, target_cache);
1971 brw_inst_set_compression(devinfo, insn, false);
1972
1973 if (brw_inst_exec_size(devinfo, insn) >= 16)
1974 src_header = vec16(src_header);
1975
1976 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1977 if (devinfo->gen < 6)
1978 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
1979
1980 /* Until gen6, writes followed by reads from the same location
1981 * are not guaranteed to be ordered unless write_commit is set.
1982 * If set, then a no-op write is issued to the destination
1983 * register to set a dependency, and a read from the destination
1984 * can be used to ensure the ordering.
1985 *
1986 * For gen6, only writes between different threads need ordering
1987 * protection. Our use of DP writes is all about register
1988 * spilling within a thread.
1989 */
1990 if (devinfo->gen >= 6) {
1991 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1992 send_commit_msg = 0;
1993 } else {
1994 dest = src_header;
1995 send_commit_msg = 1;
1996 }
1997
1998 brw_set_dest(p, insn, dest);
1999 if (devinfo->gen >= 6) {
2000 brw_set_src0(p, insn, mrf);
2001 } else {
2002 brw_set_src0(p, insn, brw_null_reg());
2003 }
2004
2005 if (devinfo->gen >= 6)
2006 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2007 else
2008 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2009
2010 brw_set_desc(p, insn,
2011 brw_message_desc(devinfo, mlen, send_commit_msg, true) |
2012 brw_dp_write_desc(devinfo, brw_scratch_surface_idx(p),
2013 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2014 msg_type, 0, /* not a render target */
2015 send_commit_msg));
2016 }
2017 }
2018
2019
2020 /**
2021 * Read a block of owords (half a GRF each) from the scratch buffer
2022 * using a constant index per channel.
2023 *
2024 * Offset must be aligned to oword size (16 bytes). Used for register
2025 * spilling.
2026 */
2027 void
2028 brw_oword_block_read_scratch(struct brw_codegen *p,
2029 struct brw_reg dest,
2030 struct brw_reg mrf,
2031 int num_regs,
2032 unsigned offset)
2033 {
2034 const struct gen_device_info *devinfo = p->devinfo;
2035
2036 if (devinfo->gen >= 6)
2037 offset /= 16;
2038
2039 if (p->devinfo->gen >= 7) {
2040 /* On gen 7 and above, we no longer have message registers and we can
2041 * send from any register we want. By using the destination register
2042 * for the message, we guarantee that the implied message write won't
2043 * accidentally overwrite anything. This has been a problem because
2044 * the MRF registers and source for the final FB write are both fixed
2045 * and may overlap.
2046 */
2047 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2048 } else {
2049 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2050 }
2051 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2052
2053 const unsigned rlen = num_regs;
2054 const unsigned target_cache =
2055 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2056 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2057 BRW_SFID_DATAPORT_READ);
2058
2059 {
2060 brw_push_insn_state(p);
2061 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2062 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2063 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2064
2065 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2066
2067 /* set message header global offset field (reg 0, element 2) */
2068 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2069 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2070
2071 brw_pop_insn_state(p);
2072 }
2073
2074 {
2075 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2076
2077 brw_inst_set_sfid(devinfo, insn, target_cache);
2078 assert(brw_inst_pred_control(devinfo, insn) == 0);
2079 brw_inst_set_compression(devinfo, insn, false);
2080
2081 brw_set_dest(p, insn, dest); /* UW? */
2082 if (devinfo->gen >= 6) {
2083 brw_set_src0(p, insn, mrf);
2084 } else {
2085 brw_set_src0(p, insn, brw_null_reg());
2086 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2087 }
2088
2089 brw_set_desc(p, insn,
2090 brw_message_desc(devinfo, 1, rlen, true) |
2091 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2092 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2093 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2094 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2095 }
2096 }
2097
2098 void
2099 gen7_block_read_scratch(struct brw_codegen *p,
2100 struct brw_reg dest,
2101 int num_regs,
2102 unsigned offset)
2103 {
2104 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2105 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2106
2107 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2108
2109 /* The HW requires that the header is present; this is to get the g0.5
2110 * scratch offset.
2111 */
2112 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2113
2114 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2115 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2116 * is 32 bytes, which happens to be the size of a register.
2117 */
2118 offset /= REG_SIZE;
2119 assert(offset < (1 << 12));
2120
2121 gen7_set_dp_scratch_message(p, insn,
2122 false, /* scratch read */
2123 false, /* OWords */
2124 false, /* invalidate after read */
2125 num_regs,
2126 offset,
2127 1, /* mlen: just g0 */
2128 num_regs, /* rlen */
2129 true); /* header present */
2130 }
2131
2132 /**
2133 * Read float[4] vectors from the data port constant cache.
2134 * Location (in buffer) should be a multiple of 16.
2135 * Used for fetching shader constants.
2136 */
2137 void brw_oword_block_read(struct brw_codegen *p,
2138 struct brw_reg dest,
2139 struct brw_reg mrf,
2140 uint32_t offset,
2141 uint32_t bind_table_index)
2142 {
2143 const struct gen_device_info *devinfo = p->devinfo;
2144 const unsigned target_cache =
2145 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2146 BRW_SFID_DATAPORT_READ);
2147 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2148
2149 /* On newer hardware, offset is in units of owords. */
2150 if (devinfo->gen >= 6)
2151 offset /= 16;
2152
2153 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2154
2155 brw_push_insn_state(p);
2156 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2157 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2158 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2159
2160 brw_push_insn_state(p);
2161 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2162 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2163
2164 /* set message header global offset field (reg 0, element 2) */
2165 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2166 brw_MOV(p,
2167 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2168 mrf.nr,
2169 2), BRW_REGISTER_TYPE_UD),
2170 brw_imm_ud(offset));
2171 brw_pop_insn_state(p);
2172
2173 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2174
2175 brw_inst_set_sfid(devinfo, insn, target_cache);
2176
2177 /* cast dest to a uword[8] vector */
2178 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2179
2180 brw_set_dest(p, insn, dest);
2181 if (devinfo->gen >= 6) {
2182 brw_set_src0(p, insn, mrf);
2183 } else {
2184 brw_set_src0(p, insn, brw_null_reg());
2185 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2186 }
2187
2188 brw_set_desc(p, insn,
2189 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2190 brw_dp_read_desc(devinfo, bind_table_index,
2191 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2192 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2193 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2194
2195 brw_pop_insn_state(p);
2196 }
2197
2198 brw_inst *
2199 brw_fb_WRITE(struct brw_codegen *p,
2200 struct brw_reg payload,
2201 struct brw_reg implied_header,
2202 unsigned msg_control,
2203 unsigned binding_table_index,
2204 unsigned msg_length,
2205 unsigned response_length,
2206 bool eot,
2207 bool last_render_target,
2208 bool header_present)
2209 {
2210 const struct gen_device_info *devinfo = p->devinfo;
2211 const unsigned target_cache =
2212 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2213 BRW_SFID_DATAPORT_WRITE);
2214 brw_inst *insn;
2215 unsigned msg_type;
2216 struct brw_reg dest, src0;
2217
2218 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2219 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2220 else
2221 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2222
2223 if (devinfo->gen >= 6) {
2224 insn = next_insn(p, BRW_OPCODE_SENDC);
2225 } else {
2226 insn = next_insn(p, BRW_OPCODE_SEND);
2227 }
2228 brw_inst_set_sfid(devinfo, insn, target_cache);
2229 brw_inst_set_compression(devinfo, insn, false);
2230
2231 if (devinfo->gen >= 6) {
2232 /* headerless version, just submit color payload */
2233 src0 = payload;
2234
2235 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2236 } else {
2237 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2238 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2239 src0 = implied_header;
2240
2241 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2242 }
2243
2244 brw_set_dest(p, insn, dest);
2245 brw_set_src0(p, insn, src0);
2246 brw_set_desc(p, insn,
2247 brw_message_desc(devinfo, msg_length, response_length,
2248 header_present) |
2249 brw_dp_write_desc(devinfo, binding_table_index, msg_control,
2250 msg_type, last_render_target,
2251 0 /* send_commit_msg */));
2252 brw_inst_set_eot(devinfo, insn, eot);
2253
2254 return insn;
2255 }
2256
2257 brw_inst *
2258 gen9_fb_READ(struct brw_codegen *p,
2259 struct brw_reg dst,
2260 struct brw_reg payload,
2261 unsigned binding_table_index,
2262 unsigned msg_length,
2263 unsigned response_length,
2264 bool per_sample)
2265 {
2266 const struct gen_device_info *devinfo = p->devinfo;
2267 assert(devinfo->gen >= 9);
2268 const unsigned msg_subtype =
2269 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2270 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2271
2272 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2273 brw_set_dest(p, insn, dst);
2274 brw_set_src0(p, insn, payload);
2275 brw_set_desc(
2276 p, insn,
2277 brw_message_desc(devinfo, msg_length, response_length, true) |
2278 brw_dp_read_desc(devinfo, binding_table_index,
2279 per_sample << 5 | msg_subtype,
2280 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2281 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2282 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2283
2284 return insn;
2285 }
2286
2287 /**
2288 * Texture sample instruction.
2289 * Note: the msg_type plus msg_length values determine exactly what kind
2290 * of sampling operation is performed. See volume 4, page 161 of docs.
2291 */
2292 void brw_SAMPLE(struct brw_codegen *p,
2293 struct brw_reg dest,
2294 unsigned msg_reg_nr,
2295 struct brw_reg src0,
2296 unsigned binding_table_index,
2297 unsigned sampler,
2298 unsigned msg_type,
2299 unsigned response_length,
2300 unsigned msg_length,
2301 unsigned header_present,
2302 unsigned simd_mode,
2303 unsigned return_format)
2304 {
2305 const struct gen_device_info *devinfo = p->devinfo;
2306 brw_inst *insn;
2307
2308 if (msg_reg_nr != -1)
2309 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2310
2311 insn = next_insn(p, BRW_OPCODE_SEND);
2312 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2313 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2314
2315 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2316 *
2317 * "Instruction compression is not allowed for this instruction (that
2318 * is, send). The hardware behavior is undefined if this instruction is
2319 * set as compressed. However, compress control can be set to "SecHalf"
2320 * to affect the EMask generation."
2321 *
2322 * No similar wording is found in later PRMs, but there are examples
2323 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2324 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2325 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2326 */
2327 brw_inst_set_compression(devinfo, insn, false);
2328
2329 if (devinfo->gen < 6)
2330 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2331
2332 brw_set_dest(p, insn, dest);
2333 brw_set_src0(p, insn, src0);
2334 brw_set_desc(p, insn,
2335 brw_message_desc(devinfo, msg_length, response_length,
2336 header_present) |
2337 brw_sampler_desc(devinfo, binding_table_index, sampler,
2338 msg_type, simd_mode, return_format));
2339 }
2340
2341 /* Adjust the message header's sampler state pointer to
2342 * select the correct group of 16 samplers.
2343 */
2344 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2345 struct brw_reg header,
2346 struct brw_reg sampler_index)
2347 {
2348 /* The "Sampler Index" field can only store values between 0 and 15.
2349 * However, we can add an offset to the "Sampler State Pointer"
2350 * field, effectively selecting a different set of 16 samplers.
2351 *
2352 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2353 * offset, and each sampler state is only 16-bytes, so we can't
2354 * exclusively use the offset - we have to use both.
2355 */
2356
2357 const struct gen_device_info *devinfo = p->devinfo;
2358
2359 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2360 const int sampler_state_size = 16; /* 16 bytes */
2361 uint32_t sampler = sampler_index.ud;
2362
2363 if (sampler >= 16) {
2364 assert(devinfo->is_haswell || devinfo->gen >= 8);
2365 brw_ADD(p,
2366 get_element_ud(header, 3),
2367 get_element_ud(brw_vec8_grf(0, 0), 3),
2368 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2369 }
2370 } else {
2371 /* Non-const sampler array indexing case */
2372 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2373 return;
2374 }
2375
2376 struct brw_reg temp = get_element_ud(header, 3);
2377
2378 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2379 brw_SHL(p, temp, temp, brw_imm_ud(4));
2380 brw_ADD(p,
2381 get_element_ud(header, 3),
2382 get_element_ud(brw_vec8_grf(0, 0), 3),
2383 temp);
2384 }
2385 }
2386
2387 /* All these variables are pretty confusing - we might be better off
2388 * using bitmasks and macros for this, in the old style. Or perhaps
2389 * just having the caller instantiate the fields in dword3 itself.
2390 */
2391 void brw_urb_WRITE(struct brw_codegen *p,
2392 struct brw_reg dest,
2393 unsigned msg_reg_nr,
2394 struct brw_reg src0,
2395 enum brw_urb_write_flags flags,
2396 unsigned msg_length,
2397 unsigned response_length,
2398 unsigned offset,
2399 unsigned swizzle)
2400 {
2401 const struct gen_device_info *devinfo = p->devinfo;
2402 brw_inst *insn;
2403
2404 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2405
2406 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2407 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2408 brw_push_insn_state(p);
2409 brw_set_default_access_mode(p, BRW_ALIGN_1);
2410 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2411 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2412 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2413 BRW_REGISTER_TYPE_UD),
2414 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2415 brw_imm_ud(0xff00));
2416 brw_pop_insn_state(p);
2417 }
2418
2419 insn = next_insn(p, BRW_OPCODE_SEND);
2420
2421 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2422
2423 brw_set_dest(p, insn, dest);
2424 brw_set_src0(p, insn, src0);
2425 brw_set_src1(p, insn, brw_imm_d(0));
2426
2427 if (devinfo->gen < 6)
2428 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2429
2430 brw_set_urb_message(p,
2431 insn,
2432 flags,
2433 msg_length,
2434 response_length,
2435 offset,
2436 swizzle);
2437 }
2438
2439 void
2440 brw_send_indirect_message(struct brw_codegen *p,
2441 unsigned sfid,
2442 struct brw_reg dst,
2443 struct brw_reg payload,
2444 struct brw_reg desc,
2445 unsigned desc_imm)
2446 {
2447 const struct gen_device_info *devinfo = p->devinfo;
2448 struct brw_inst *send;
2449
2450 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2451
2452 assert(desc.type == BRW_REGISTER_TYPE_UD);
2453
2454 if (desc.file == BRW_IMMEDIATE_VALUE) {
2455 send = next_insn(p, BRW_OPCODE_SEND);
2456 brw_set_desc(p, send, desc.ud | desc_imm);
2457
2458 } else {
2459 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2460
2461 brw_push_insn_state(p);
2462 brw_set_default_access_mode(p, BRW_ALIGN_1);
2463 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2464 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2465 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2466
2467 /* Load the indirect descriptor to an address register using OR so the
2468 * caller can specify additional descriptor bits with the desc_imm
2469 * immediate.
2470 */
2471 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2472
2473 brw_pop_insn_state(p);
2474
2475 send = next_insn(p, BRW_OPCODE_SEND);
2476 brw_set_src1(p, send, addr);
2477 }
2478
2479 if (dst.width < BRW_EXECUTE_8)
2480 brw_inst_set_exec_size(devinfo, send, dst.width);
2481
2482 brw_set_dest(p, send, dst);
2483 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2484 brw_inst_set_sfid(devinfo, send, sfid);
2485 }
2486
2487 static void
2488 brw_send_indirect_surface_message(struct brw_codegen *p,
2489 unsigned sfid,
2490 struct brw_reg dst,
2491 struct brw_reg payload,
2492 struct brw_reg surface,
2493 unsigned desc_imm)
2494 {
2495 if (surface.file != BRW_IMMEDIATE_VALUE) {
2496 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2497
2498 brw_push_insn_state(p);
2499 brw_set_default_access_mode(p, BRW_ALIGN_1);
2500 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2501 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2502 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2503
2504 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2505 * some surface array is accessed out of bounds.
2506 */
2507 brw_AND(p, addr,
2508 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2509 BRW_GET_SWZ(surface.swizzle, 0)),
2510 brw_imm_ud(0xff));
2511
2512 brw_pop_insn_state(p);
2513
2514 surface = addr;
2515 }
2516
2517 brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm);
2518 }
2519
2520 static bool
2521 while_jumps_before_offset(const struct gen_device_info *devinfo,
2522 brw_inst *insn, int while_offset, int start_offset)
2523 {
2524 int scale = 16 / brw_jump_scale(devinfo);
2525 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2526 : brw_inst_jip(devinfo, insn);
2527 assert(jip < 0);
2528 return while_offset + jip * scale <= start_offset;
2529 }
2530
2531
2532 static int
2533 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2534 {
2535 int offset;
2536 void *store = p->store;
2537 const struct gen_device_info *devinfo = p->devinfo;
2538
2539 int depth = 0;
2540
2541 for (offset = next_offset(devinfo, store, start_offset);
2542 offset < p->next_insn_offset;
2543 offset = next_offset(devinfo, store, offset)) {
2544 brw_inst *insn = store + offset;
2545
2546 switch (brw_inst_opcode(devinfo, insn)) {
2547 case BRW_OPCODE_IF:
2548 depth++;
2549 break;
2550 case BRW_OPCODE_ENDIF:
2551 if (depth == 0)
2552 return offset;
2553 depth--;
2554 break;
2555 case BRW_OPCODE_WHILE:
2556 /* If the while doesn't jump before our instruction, it's the end
2557 * of a sibling do...while loop. Ignore it.
2558 */
2559 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2560 continue;
2561 /* fallthrough */
2562 case BRW_OPCODE_ELSE:
2563 case BRW_OPCODE_HALT:
2564 if (depth == 0)
2565 return offset;
2566 }
2567 }
2568
2569 return 0;
2570 }
2571
2572 /* There is no DO instruction on gen6, so to find the end of the loop
2573 * we have to see if the loop is jumping back before our start
2574 * instruction.
2575 */
2576 static int
2577 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2578 {
2579 const struct gen_device_info *devinfo = p->devinfo;
2580 int offset;
2581 void *store = p->store;
2582
2583 assert(devinfo->gen >= 6);
2584
2585 /* Always start after the instruction (such as a WHILE) we're trying to fix
2586 * up.
2587 */
2588 for (offset = next_offset(devinfo, store, start_offset);
2589 offset < p->next_insn_offset;
2590 offset = next_offset(devinfo, store, offset)) {
2591 brw_inst *insn = store + offset;
2592
2593 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2594 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2595 return offset;
2596 }
2597 }
2598 assert(!"not reached");
2599 return start_offset;
2600 }
2601
2602 /* After program generation, go back and update the UIP and JIP of
2603 * BREAK, CONT, and HALT instructions to their correct locations.
2604 */
2605 void
2606 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2607 {
2608 const struct gen_device_info *devinfo = p->devinfo;
2609 int offset;
2610 int br = brw_jump_scale(devinfo);
2611 int scale = 16 / br;
2612 void *store = p->store;
2613
2614 if (devinfo->gen < 6)
2615 return;
2616
2617 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2618 brw_inst *insn = store + offset;
2619 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2620
2621 int block_end_offset = brw_find_next_block_end(p, offset);
2622 switch (brw_inst_opcode(devinfo, insn)) {
2623 case BRW_OPCODE_BREAK:
2624 assert(block_end_offset != 0);
2625 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2626 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2627 brw_inst_set_uip(devinfo, insn,
2628 (brw_find_loop_end(p, offset) - offset +
2629 (devinfo->gen == 6 ? 16 : 0)) / scale);
2630 break;
2631 case BRW_OPCODE_CONTINUE:
2632 assert(block_end_offset != 0);
2633 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2634 brw_inst_set_uip(devinfo, insn,
2635 (brw_find_loop_end(p, offset) - offset) / scale);
2636
2637 assert(brw_inst_uip(devinfo, insn) != 0);
2638 assert(brw_inst_jip(devinfo, insn) != 0);
2639 break;
2640
2641 case BRW_OPCODE_ENDIF: {
2642 int32_t jump = (block_end_offset == 0) ?
2643 1 * br : (block_end_offset - offset) / scale;
2644 if (devinfo->gen >= 7)
2645 brw_inst_set_jip(devinfo, insn, jump);
2646 else
2647 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2648 break;
2649 }
2650
2651 case BRW_OPCODE_HALT:
2652 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2653 *
2654 * "In case of the halt instruction not inside any conditional
2655 * code block, the value of <JIP> and <UIP> should be the
2656 * same. In case of the halt instruction inside conditional code
2657 * block, the <UIP> should be the end of the program, and the
2658 * <JIP> should be end of the most inner conditional code block."
2659 *
2660 * The uip will have already been set by whoever set up the
2661 * instruction.
2662 */
2663 if (block_end_offset == 0) {
2664 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2665 } else {
2666 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2667 }
2668 assert(brw_inst_uip(devinfo, insn) != 0);
2669 assert(brw_inst_jip(devinfo, insn) != 0);
2670 break;
2671 }
2672 }
2673 }
2674
2675 void brw_ff_sync(struct brw_codegen *p,
2676 struct brw_reg dest,
2677 unsigned msg_reg_nr,
2678 struct brw_reg src0,
2679 bool allocate,
2680 unsigned response_length,
2681 bool eot)
2682 {
2683 const struct gen_device_info *devinfo = p->devinfo;
2684 brw_inst *insn;
2685
2686 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2687
2688 insn = next_insn(p, BRW_OPCODE_SEND);
2689 brw_set_dest(p, insn, dest);
2690 brw_set_src0(p, insn, src0);
2691 brw_set_src1(p, insn, brw_imm_d(0));
2692
2693 if (devinfo->gen < 6)
2694 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2695
2696 brw_set_ff_sync_message(p,
2697 insn,
2698 allocate,
2699 response_length,
2700 eot);
2701 }
2702
2703 /**
2704 * Emit the SEND instruction necessary to generate stream output data on Gen6
2705 * (for transform feedback).
2706 *
2707 * If send_commit_msg is true, this is the last piece of stream output data
2708 * from this thread, so send the data as a committed write. According to the
2709 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2710 *
2711 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2712 * writes are complete by sending the final write as a committed write."
2713 */
2714 void
2715 brw_svb_write(struct brw_codegen *p,
2716 struct brw_reg dest,
2717 unsigned msg_reg_nr,
2718 struct brw_reg src0,
2719 unsigned binding_table_index,
2720 bool send_commit_msg)
2721 {
2722 const struct gen_device_info *devinfo = p->devinfo;
2723 const unsigned target_cache =
2724 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2725 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2726 BRW_SFID_DATAPORT_WRITE);
2727 brw_inst *insn;
2728
2729 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2730
2731 insn = next_insn(p, BRW_OPCODE_SEND);
2732 brw_inst_set_sfid(devinfo, insn, target_cache);
2733 brw_set_dest(p, insn, dest);
2734 brw_set_src0(p, insn, src0);
2735 brw_set_desc(p, insn,
2736 brw_message_desc(devinfo, 1, send_commit_msg, true) |
2737 brw_dp_write_desc(devinfo, binding_table_index,
2738 0, /* msg_control: ignored */
2739 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2740 0, /* last_render_target: ignored */
2741 send_commit_msg)); /* send_commit_msg */
2742 }
2743
2744 static unsigned
2745 brw_surface_payload_size(struct brw_codegen *p,
2746 unsigned num_channels,
2747 bool has_simd4x2,
2748 bool has_simd16)
2749 {
2750 if (has_simd4x2 && brw_get_default_access_mode(p) == BRW_ALIGN_16)
2751 return 1;
2752 else if (has_simd16 && brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2753 return 2 * num_channels;
2754 else
2755 return num_channels;
2756 }
2757
2758 static uint32_t
2759 brw_dp_untyped_atomic_desc(struct brw_codegen *p,
2760 unsigned atomic_op,
2761 bool response_expected)
2762 {
2763 const struct gen_device_info *devinfo = p->devinfo;
2764 unsigned msg_control =
2765 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2766 (response_expected ? 1 << 5 : 0); /* Return data expected */
2767 unsigned msg_type;
2768
2769 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2770 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2771 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2772 msg_control |= 1 << 4; /* SIMD8 mode */
2773
2774 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP;
2775 } else {
2776 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2;
2777 }
2778 } else {
2779 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2780 msg_control |= 1 << 4; /* SIMD8 mode */
2781
2782 msg_type = GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP;
2783 }
2784
2785 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2786 }
2787
2788 void
2789 brw_untyped_atomic(struct brw_codegen *p,
2790 struct brw_reg dst,
2791 struct brw_reg payload,
2792 struct brw_reg surface,
2793 unsigned atomic_op,
2794 unsigned msg_length,
2795 bool response_expected,
2796 bool header_present)
2797 {
2798 const struct gen_device_info *devinfo = p->devinfo;
2799 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2800 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2801 GEN7_SFID_DATAPORT_DATA_CACHE);
2802 const unsigned response_length = brw_surface_payload_size(
2803 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, true);
2804 const unsigned desc =
2805 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2806 brw_dp_untyped_atomic_desc(p, atomic_op, response_expected);
2807 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2808 /* Mask out unused components -- This is especially important in Align16
2809 * mode on generations that don't have native support for SIMD4x2 atomics,
2810 * because unused but enabled components will cause the dataport to perform
2811 * additional atomic operations on the addresses that happen to be in the
2812 * uninitialized Y, Z and W coordinates of the payload.
2813 */
2814 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2815
2816 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
2817 payload, surface, desc);
2818 }
2819
2820 static uint32_t
2821 brw_dp_untyped_atomic_float_desc(struct brw_codegen *p,
2822 unsigned atomic_op,
2823 bool response_expected)
2824 {
2825 const struct gen_device_info *devinfo = p->devinfo;
2826 const unsigned msg_type = GEN9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP;
2827 unsigned msg_control =
2828 atomic_op | /* Atomic Operation Type: BRW_AOP_F* */
2829 (response_expected ? 1 << 5 : 0); /* Return data expected */
2830
2831 assert(devinfo->gen >= 9);
2832 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2833
2834 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2835 msg_control |= 1 << 4; /* SIMD8 mode */
2836
2837 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2838 }
2839
2840 void
2841 brw_untyped_atomic_float(struct brw_codegen *p,
2842 struct brw_reg dst,
2843 struct brw_reg payload,
2844 struct brw_reg surface,
2845 unsigned atomic_op,
2846 unsigned msg_length,
2847 bool response_expected,
2848 bool header_present)
2849 {
2850 const struct gen_device_info *devinfo = p->devinfo;
2851
2852 assert(devinfo->gen >= 9);
2853 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2854
2855 const unsigned sfid = HSW_SFID_DATAPORT_DATA_CACHE_1;
2856 const unsigned response_length = brw_surface_payload_size(
2857 p, response_expected, true, true);
2858 const unsigned desc =
2859 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2860 brw_dp_untyped_atomic_float_desc(p, atomic_op, response_expected);
2861
2862 brw_send_indirect_surface_message(p, sfid,
2863 brw_writemask(dst, WRITEMASK_XYZW),
2864 payload, surface, desc);
2865 }
2866
2867 static uint32_t
2868 brw_dp_untyped_surface_read_desc(struct brw_codegen *p,
2869 unsigned num_channels)
2870 {
2871 const struct gen_device_info *devinfo = p->devinfo;
2872 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2873 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2874 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ);
2875 /* Set mask of 32-bit channels to drop. */
2876 unsigned msg_control = 0xf & (0xf << num_channels);
2877
2878 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2879 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2880 msg_control |= 1 << 4; /* SIMD16 mode */
2881 else
2882 msg_control |= 2 << 4; /* SIMD8 mode */
2883 }
2884
2885 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2886 }
2887
2888 void
2889 brw_untyped_surface_read(struct brw_codegen *p,
2890 struct brw_reg dst,
2891 struct brw_reg payload,
2892 struct brw_reg surface,
2893 unsigned msg_length,
2894 unsigned num_channels)
2895 {
2896 const struct gen_device_info *devinfo = p->devinfo;
2897 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2898 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2899 GEN7_SFID_DATAPORT_DATA_CACHE);
2900 const unsigned response_length =
2901 brw_surface_payload_size(p, num_channels, true, true);
2902 const unsigned desc =
2903 brw_message_desc(devinfo, msg_length, response_length, false) |
2904 brw_dp_untyped_surface_read_desc(p, num_channels);
2905
2906 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
2907 }
2908
2909 static uint32_t
2910 brw_dp_untyped_surface_write_desc(struct brw_codegen *p,
2911 unsigned num_channels)
2912 {
2913 const struct gen_device_info *devinfo = p->devinfo;
2914 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2915 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2916 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2917 /* Set mask of 32-bit channels to drop. */
2918 unsigned msg_control = 0xf & (0xf << num_channels);
2919
2920 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2921 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2922 msg_control |= 1 << 4; /* SIMD16 mode */
2923 else
2924 msg_control |= 2 << 4; /* SIMD8 mode */
2925 } else {
2926 if (devinfo->gen >= 8 || devinfo->is_haswell)
2927 msg_control |= 0 << 4; /* SIMD4x2 mode */
2928 else
2929 msg_control |= 2 << 4; /* SIMD8 mode */
2930 }
2931
2932 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2933 }
2934
2935 void
2936 brw_untyped_surface_write(struct brw_codegen *p,
2937 struct brw_reg payload,
2938 struct brw_reg surface,
2939 unsigned msg_length,
2940 unsigned num_channels,
2941 bool header_present)
2942 {
2943 const struct gen_device_info *devinfo = p->devinfo;
2944 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2945 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2946 GEN7_SFID_DATAPORT_DATA_CACHE);
2947 const unsigned desc =
2948 brw_message_desc(devinfo, msg_length, 0, header_present) |
2949 brw_dp_untyped_surface_write_desc(p, num_channels);
2950 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2951 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2952 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2953 WRITEMASK_X : WRITEMASK_XYZW;
2954
2955 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
2956 payload, surface, desc);
2957 }
2958
2959 static unsigned
2960 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
2961 {
2962 switch (bit_size) {
2963 case 8:
2964 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
2965 case 16:
2966 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
2967 case 32:
2968 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
2969 default:
2970 unreachable("Unsupported bit_size for byte scattered messages");
2971 }
2972 }
2973
2974 static uint32_t
2975 brw_dp_byte_scattered_desc(struct brw_codegen *p, unsigned bit_size,
2976 unsigned msg_type)
2977 {
2978 const struct gen_device_info *devinfo = p->devinfo;
2979 unsigned msg_control =
2980 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
2981
2982 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2983 msg_control |= 1; /* SIMD16 mode */
2984 else
2985 msg_control |= 0; /* SIMD8 mode */
2986
2987 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2988 }
2989
2990 void
2991 brw_byte_scattered_read(struct brw_codegen *p,
2992 struct brw_reg dst,
2993 struct brw_reg payload,
2994 struct brw_reg surface,
2995 unsigned msg_length,
2996 unsigned bit_size)
2997 {
2998 const struct gen_device_info *devinfo = p->devinfo;
2999 assert(devinfo->gen > 7 || devinfo->is_haswell);
3000 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3001 const unsigned response_length =
3002 brw_surface_payload_size(p, 1, true, true);
3003 const unsigned desc =
3004 brw_message_desc(devinfo, msg_length, response_length, false) |
3005 brw_dp_byte_scattered_desc(p, bit_size,
3006 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3007
3008 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
3009 dst, payload, surface, desc);
3010 }
3011
3012 void
3013 brw_byte_scattered_write(struct brw_codegen *p,
3014 struct brw_reg payload,
3015 struct brw_reg surface,
3016 unsigned msg_length,
3017 unsigned bit_size,
3018 bool header_present)
3019 {
3020 const struct gen_device_info *devinfo = p->devinfo;
3021 assert(devinfo->gen > 7 || devinfo->is_haswell);
3022 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3023 const unsigned desc =
3024 brw_message_desc(devinfo, msg_length, 0, header_present) |
3025 brw_dp_byte_scattered_desc(p, bit_size,
3026 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3027
3028 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
3029 brw_writemask(brw_null_reg(),
3030 WRITEMASK_XYZW),
3031 payload, surface, desc);
3032 }
3033
3034 static uint32_t
3035 brw_dp_typed_atomic_desc(struct brw_codegen *p,
3036 unsigned atomic_op,
3037 bool response_expected)
3038 {
3039 const struct gen_device_info *devinfo = p->devinfo;
3040 unsigned msg_control =
3041 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3042 (response_expected ? 1 << 5 : 0); /* Return data expected */
3043 unsigned msg_type;
3044
3045 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3046 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3047 if ((brw_get_default_group(p) / 8) % 2 == 1)
3048 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3049
3050 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP;
3051 } else {
3052 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2;
3053 }
3054
3055 } else {
3056 if ((brw_get_default_group(p) / 8) % 2 == 1)
3057 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3058
3059 msg_type = GEN7_DATAPORT_RC_TYPED_ATOMIC_OP;
3060 }
3061
3062 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3063 }
3064
3065 void
3066 brw_typed_atomic(struct brw_codegen *p,
3067 struct brw_reg dst,
3068 struct brw_reg payload,
3069 struct brw_reg surface,
3070 unsigned atomic_op,
3071 unsigned msg_length,
3072 bool response_expected,
3073 bool header_present) {
3074 const struct gen_device_info *devinfo = p->devinfo;
3075 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3076 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3077 GEN6_SFID_DATAPORT_RENDER_CACHE);
3078 const unsigned response_length = brw_surface_payload_size(
3079 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, false);
3080 const unsigned desc =
3081 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3082 brw_dp_typed_atomic_desc(p, atomic_op, response_expected);
3083 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3084 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3085 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3086
3087 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
3088 payload, surface, desc);
3089 }
3090
3091 static uint32_t
3092 brw_dp_typed_surface_read_desc(struct brw_codegen *p,
3093 unsigned num_channels)
3094 {
3095 const struct gen_device_info *devinfo = p->devinfo;
3096 /* Set mask of unused channels. */
3097 unsigned msg_control = 0xf & (0xf << num_channels);
3098 unsigned msg_type;
3099
3100 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3101 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3102 if ((brw_get_default_group(p) / 8) % 2 == 1)
3103 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3104 else
3105 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3106 }
3107
3108 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ;
3109 } else {
3110 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3111 if ((brw_get_default_group(p) / 8) % 2 == 1)
3112 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3113 }
3114
3115 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_READ;
3116 }
3117
3118 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3119 }
3120
3121 void
3122 brw_typed_surface_read(struct brw_codegen *p,
3123 struct brw_reg dst,
3124 struct brw_reg payload,
3125 struct brw_reg surface,
3126 unsigned msg_length,
3127 unsigned num_channels,
3128 bool header_present)
3129 {
3130 const struct gen_device_info *devinfo = p->devinfo;
3131 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3132 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3133 GEN6_SFID_DATAPORT_RENDER_CACHE);
3134 const unsigned response_length = brw_surface_payload_size(
3135 p, num_channels, devinfo->gen >= 8 || devinfo->is_haswell, false);
3136 const unsigned desc =
3137 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3138 brw_dp_typed_surface_read_desc(p, num_channels);
3139
3140 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
3141 }
3142
3143 static uint32_t
3144 brw_dp_typed_surface_write_desc(struct brw_codegen *p,
3145 unsigned num_channels)
3146 {
3147 const struct gen_device_info *devinfo = p->devinfo;
3148 /* Set mask of unused channels. */
3149 unsigned msg_control = 0xf & (0xf << num_channels);
3150 unsigned msg_type;
3151
3152 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3153 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3154 if ((brw_get_default_group(p) / 8) % 2 == 1)
3155 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3156 else
3157 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3158 }
3159
3160 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE;
3161
3162 } else {
3163 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3164 if ((brw_get_default_group(p) / 8) % 2 == 1)
3165 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3166 }
3167
3168 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE;
3169 }
3170
3171 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3172 }
3173
3174 void
3175 brw_typed_surface_write(struct brw_codegen *p,
3176 struct brw_reg payload,
3177 struct brw_reg surface,
3178 unsigned msg_length,
3179 unsigned num_channels,
3180 bool header_present)
3181 {
3182 const struct gen_device_info *devinfo = p->devinfo;
3183 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3184 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3185 GEN6_SFID_DATAPORT_RENDER_CACHE);
3186 const unsigned desc =
3187 brw_message_desc(devinfo, msg_length, 0, header_present) |
3188 brw_dp_typed_surface_write_desc(p, num_channels);
3189 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3190 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3191 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3192 WRITEMASK_X : WRITEMASK_XYZW);
3193
3194 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
3195 payload, surface, desc);
3196 }
3197
3198 static void
3199 brw_set_memory_fence_message(struct brw_codegen *p,
3200 struct brw_inst *insn,
3201 enum brw_message_target sfid,
3202 bool commit_enable)
3203 {
3204 const struct gen_device_info *devinfo = p->devinfo;
3205
3206 brw_set_desc(p, insn, brw_message_desc(
3207 devinfo, 1, (commit_enable ? 1 : 0), true));
3208
3209 brw_inst_set_sfid(devinfo, insn, sfid);
3210
3211 switch (sfid) {
3212 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3213 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3214 break;
3215 case GEN7_SFID_DATAPORT_DATA_CACHE:
3216 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3217 break;
3218 default:
3219 unreachable("Not reached");
3220 }
3221
3222 if (commit_enable)
3223 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3224 }
3225
3226 void
3227 brw_memory_fence(struct brw_codegen *p,
3228 struct brw_reg dst,
3229 enum opcode send_op)
3230 {
3231 const struct gen_device_info *devinfo = p->devinfo;
3232 const bool commit_enable =
3233 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3234 (devinfo->gen == 7 && !devinfo->is_haswell);
3235 struct brw_inst *insn;
3236
3237 brw_push_insn_state(p);
3238 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3239 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3240 dst = vec1(dst);
3241
3242 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3243 * message doesn't write anything back.
3244 */
3245 insn = next_insn(p, send_op);
3246 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3247 brw_set_dest(p, insn, dst);
3248 brw_set_src0(p, insn, dst);
3249 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3250 commit_enable);
3251
3252 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3253 /* IVB does typed surface access through the render cache, so we need to
3254 * flush it too. Use a different register so both flushes can be
3255 * pipelined by the hardware.
3256 */
3257 insn = next_insn(p, send_op);
3258 brw_set_dest(p, insn, offset(dst, 1));
3259 brw_set_src0(p, insn, offset(dst, 1));
3260 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3261 commit_enable);
3262
3263 /* Now write the response of the second message into the response of the
3264 * first to trigger a pipeline stall -- This way future render and data
3265 * cache messages will be properly ordered with respect to past data and
3266 * render cache messages.
3267 */
3268 brw_MOV(p, dst, offset(dst, 1));
3269 }
3270
3271 brw_pop_insn_state(p);
3272 }
3273
3274 void
3275 brw_pixel_interpolator_query(struct brw_codegen *p,
3276 struct brw_reg dest,
3277 struct brw_reg mrf,
3278 bool noperspective,
3279 unsigned mode,
3280 struct brw_reg data,
3281 unsigned msg_length,
3282 unsigned response_length)
3283 {
3284 const struct gen_device_info *devinfo = p->devinfo;
3285 const uint16_t exec_size = brw_get_default_exec_size(p);
3286 const unsigned slot_group = brw_get_default_group(p) / 16;
3287 const unsigned simd_mode = (exec_size == BRW_EXECUTE_16);
3288 const unsigned desc =
3289 brw_message_desc(devinfo, msg_length, response_length, false) |
3290 brw_pixel_interp_desc(devinfo, mode, noperspective, simd_mode,
3291 slot_group);
3292
3293 /* brw_send_indirect_message will automatically use a direct send message
3294 * if data is actually immediate.
3295 */
3296 brw_send_indirect_message(p,
3297 GEN7_SFID_PIXEL_INTERPOLATOR,
3298 dest,
3299 mrf,
3300 vec1(data),
3301 desc);
3302 }
3303
3304 void
3305 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3306 struct brw_reg mask)
3307 {
3308 const struct gen_device_info *devinfo = p->devinfo;
3309 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3310 const unsigned qtr_control = brw_get_default_group(p) / 8;
3311 brw_inst *inst;
3312
3313 assert(devinfo->gen >= 7);
3314 assert(mask.type == BRW_REGISTER_TYPE_UD);
3315
3316 brw_push_insn_state(p);
3317
3318 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3319 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3320
3321 if (devinfo->gen >= 8) {
3322 /* Getting the first active channel index is easy on Gen8: Just find
3323 * the first bit set in the execution mask. The register exists on
3324 * HSW already but it reads back as all ones when the current
3325 * instruction has execution masking disabled, so it's kind of
3326 * useless.
3327 */
3328 struct brw_reg exec_mask =
3329 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3330
3331 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3332 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3333 /* Unfortunately, ce0 does not take into account the thread
3334 * dispatch mask, which may be a problem in cases where it's not
3335 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3336 * some n). Combine ce0 with the given dispatch (or vector) mask
3337 * to mask off those channels which were never dispatched by the
3338 * hardware.
3339 */
3340 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3341 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3342 exec_mask = vec1(dst);
3343 }
3344
3345 /* Quarter control has the effect of magically shifting the value of
3346 * ce0 so you'll get the first active channel relative to the
3347 * specified quarter control as result.
3348 */
3349 inst = brw_FBL(p, vec1(dst), exec_mask);
3350 } else {
3351 const struct brw_reg flag = brw_flag_reg(p->current->flag_subreg / 2,
3352 p->current->flag_subreg % 2);
3353
3354 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3355 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3356
3357 /* Run enough instructions returning zero with execution masking and
3358 * a conditional modifier enabled in order to get the full execution
3359 * mask in f1.0. We could use a single 32-wide move here if it
3360 * weren't because of the hardware bug that causes channel enables to
3361 * be applied incorrectly to the second half of 32-wide instructions
3362 * on Gen7.
3363 */
3364 const unsigned lower_size = MIN2(16, exec_size);
3365 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3366 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3367 brw_imm_uw(0));
3368 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3369 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3370 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3371 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3372 }
3373
3374 /* Find the first bit set in the exec_size-wide portion of the flag
3375 * register that was updated by the last sequence of MOV
3376 * instructions.
3377 */
3378 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3379 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3380 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3381 }
3382 } else {
3383 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3384
3385 if (devinfo->gen >= 8 &&
3386 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3387 /* In SIMD4x2 mode the first active channel index is just the
3388 * negation of the first bit of the mask register. Note that ce0
3389 * doesn't take into account the dispatch mask, so the Gen7 path
3390 * should be used instead unless you have the guarantee that the
3391 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3392 * for some n).
3393 */
3394 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3395 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3396 brw_imm_ud(1));
3397
3398 } else {
3399 /* Overwrite the destination without and with execution masking to
3400 * find out which of the channels is active.
3401 */
3402 brw_push_insn_state(p);
3403 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3404 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3405 brw_imm_ud(1));
3406
3407 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3408 brw_imm_ud(0));
3409 brw_pop_insn_state(p);
3410 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3411 }
3412 }
3413
3414 brw_pop_insn_state(p);
3415 }
3416
3417 void
3418 brw_broadcast(struct brw_codegen *p,
3419 struct brw_reg dst,
3420 struct brw_reg src,
3421 struct brw_reg idx)
3422 {
3423 const struct gen_device_info *devinfo = p->devinfo;
3424 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3425 brw_inst *inst;
3426
3427 brw_push_insn_state(p);
3428 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3429 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3430
3431 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3432 src.address_mode == BRW_ADDRESS_DIRECT);
3433 assert(!src.abs && !src.negate);
3434 assert(src.type == dst.type);
3435
3436 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3437 idx.file == BRW_IMMEDIATE_VALUE) {
3438 /* Trivial, the source is already uniform or the index is a constant.
3439 * We will typically not get here if the optimizer is doing its job, but
3440 * asserting would be mean.
3441 */
3442 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3443 brw_MOV(p, dst,
3444 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3445 stride(suboffset(src, 4 * i), 0, 4, 1)));
3446 } else {
3447 /* From the Haswell PRM section "Register Region Restrictions":
3448 *
3449 * "The lower bits of the AddressImmediate must not overflow to
3450 * change the register address. The lower 5 bits of Address
3451 * Immediate when added to lower 5 bits of address register gives
3452 * the sub-register offset. The upper bits of Address Immediate
3453 * when added to upper bits of address register gives the register
3454 * address. Any overflow from sub-register offset is dropped."
3455 *
3456 * Fortunately, for broadcast, we never have a sub-register offset so
3457 * this isn't an issue.
3458 */
3459 assert(src.subnr == 0);
3460
3461 if (align1) {
3462 const struct brw_reg addr =
3463 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3464 unsigned offset = src.nr * REG_SIZE + src.subnr;
3465 /* Limit in bytes of the signed indirect addressing immediate. */
3466 const unsigned limit = 512;
3467
3468 brw_push_insn_state(p);
3469 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3470 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3471
3472 /* Take into account the component size and horizontal stride. */
3473 assert(src.vstride == src.hstride + src.width);
3474 brw_SHL(p, addr, vec1(idx),
3475 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3476 src.hstride - 1));
3477
3478 /* We can only address up to limit bytes using the indirect
3479 * addressing immediate, account for the difference if the source
3480 * register is above this limit.
3481 */
3482 if (offset >= limit) {
3483 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3484 offset = offset % limit;
3485 }
3486
3487 brw_pop_insn_state(p);
3488
3489 /* Use indirect addressing to fetch the specified component. */
3490 if (type_sz(src.type) > 4 &&
3491 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3492 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3493 *
3494 * "When source or destination datatype is 64b or operation is
3495 * integer DWord multiply, indirect addressing must not be
3496 * used."
3497 *
3498 * To work around both of this issue, we do two integer MOVs
3499 * insead of one 64-bit MOV. Because no double value should ever
3500 * cross a register boundary, it's safe to use the immediate
3501 * offset in the indirect here to handle adding 4 bytes to the
3502 * offset and avoid the extra ADD to the register file.
3503 */
3504 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3505 retype(brw_vec1_indirect(addr.subnr, offset),
3506 BRW_REGISTER_TYPE_D));
3507 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3508 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3509 BRW_REGISTER_TYPE_D));
3510 } else {
3511 brw_MOV(p, dst,
3512 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3513 }
3514 } else {
3515 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3516 * to all bits of a flag register,
3517 */
3518 inst = brw_MOV(p,
3519 brw_null_reg(),
3520 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3521 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3522 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3523 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3524
3525 /* and use predicated SEL to pick the right channel. */
3526 inst = brw_SEL(p, dst,
3527 stride(suboffset(src, 4), 4, 4, 1),
3528 stride(src, 4, 4, 1));
3529 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3530 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3531 }
3532 }
3533
3534 brw_pop_insn_state(p);
3535 }
3536
3537 /**
3538 * This instruction is generated as a single-channel align1 instruction by
3539 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3540 *
3541 * We can't use the typed atomic op in the FS because that has the execution
3542 * mask ANDed with the pixel mask, but we just want to write the one dword for
3543 * all the pixels.
3544 *
3545 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3546 * one u32. So we use the same untyped atomic write message as the pixel
3547 * shader.
3548 *
3549 * The untyped atomic operation requires a BUFFER surface type with RAW
3550 * format, and is only accessible through the legacy DATA_CACHE dataport
3551 * messages.
3552 */
3553 void brw_shader_time_add(struct brw_codegen *p,
3554 struct brw_reg payload,
3555 uint32_t surf_index)
3556 {
3557 const struct gen_device_info *devinfo = p->devinfo;
3558 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3559 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3560 GEN7_SFID_DATAPORT_DATA_CACHE);
3561 assert(devinfo->gen >= 7);
3562
3563 brw_push_insn_state(p);
3564 brw_set_default_access_mode(p, BRW_ALIGN_1);
3565 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3566 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3567 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3568
3569 /* We use brw_vec1_reg and unmasked because we want to increment the given
3570 * offset only once.
3571 */
3572 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3573 BRW_ARF_NULL, 0));
3574 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3575 payload.nr, 0));
3576 brw_set_desc(p, send, (brw_message_desc(devinfo, 2, 0, false) |
3577 brw_dp_untyped_atomic_desc(p, BRW_AOP_ADD, false)));
3578
3579 brw_inst_set_sfid(devinfo, send, sfid);
3580 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3581
3582 brw_pop_insn_state(p);
3583 }
3584
3585
3586 /**
3587 * Emit the SEND message for a barrier
3588 */
3589 void
3590 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3591 {
3592 const struct gen_device_info *devinfo = p->devinfo;
3593 struct brw_inst *inst;
3594
3595 assert(devinfo->gen >= 7);
3596
3597 brw_push_insn_state(p);
3598 brw_set_default_access_mode(p, BRW_ALIGN_1);
3599 inst = next_insn(p, BRW_OPCODE_SEND);
3600 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3601 brw_set_src0(p, inst, src);
3602 brw_set_src1(p, inst, brw_null_reg());
3603 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3604
3605 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3606 brw_inst_set_gateway_notify(devinfo, inst, 1);
3607 brw_inst_set_gateway_subfuncid(devinfo, inst,
3608 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3609
3610 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3611 brw_pop_insn_state(p);
3612 }
3613
3614
3615 /**
3616 * Emit the wait instruction for a barrier
3617 */
3618 void
3619 brw_WAIT(struct brw_codegen *p)
3620 {
3621 const struct gen_device_info *devinfo = p->devinfo;
3622 struct brw_inst *insn;
3623
3624 struct brw_reg src = brw_notification_reg();
3625
3626 insn = next_insn(p, BRW_OPCODE_WAIT);
3627 brw_set_dest(p, insn, src);
3628 brw_set_src0(p, insn, src);
3629 brw_set_src1(p, insn, brw_null_reg());
3630
3631 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3632 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3633 }
3634
3635 /**
3636 * Changes the floating point rounding mode updating the control register
3637 * field defined at cr0.0[5-6] bits. This function supports the changes to
3638 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3639 * Only RTNE and RTZ rounding are enabled at nir.
3640 */
3641 void
3642 brw_rounding_mode(struct brw_codegen *p,
3643 enum brw_rnd_mode mode)
3644 {
3645 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3646
3647 if (bits != BRW_CR0_RND_MODE_MASK) {
3648 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3649 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3650 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3651
3652 /* From the Skylake PRM, Volume 7, page 760:
3653 * "Implementation Restriction on Register Access: When the control
3654 * register is used as an explicit source and/or destination, hardware
3655 * does not ensure execution pipeline coherency. Software must set the
3656 * thread control field to ‘switch’ for an instruction that uses
3657 * control register as an explicit operand."
3658 */
3659 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3660 }
3661
3662 if (bits) {
3663 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3664 brw_imm_ud(bits));
3665 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3666 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3667 }
3668 }