intel/eu/gen7: Fix brw_MOV() with DF destination and strided source.
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Specify the descriptor and extended descriptor immediate for a SEND(C)
364 * message instruction.
365 */
366 void
367 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
368 unsigned desc, unsigned ex_desc)
369 {
370 const struct gen_device_info *devinfo = p->devinfo;
371 assert(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
372 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC);
373 brw_inst_set_src1_file_type(devinfo, inst,
374 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_UD);
375 brw_inst_set_send_desc(devinfo, inst, desc);
376 if (devinfo->gen >= 9)
377 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
378 }
379
380 static void brw_set_math_message( struct brw_codegen *p,
381 brw_inst *inst,
382 unsigned function,
383 unsigned integer_type,
384 bool low_precision,
385 unsigned dataType )
386 {
387 const struct gen_device_info *devinfo = p->devinfo;
388 unsigned msg_length;
389 unsigned response_length;
390
391 /* Infer message length from the function */
392 switch (function) {
393 case BRW_MATH_FUNCTION_POW:
394 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
395 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
396 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
397 msg_length = 2;
398 break;
399 default:
400 msg_length = 1;
401 break;
402 }
403
404 /* Infer response length from the function */
405 switch (function) {
406 case BRW_MATH_FUNCTION_SINCOS:
407 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
408 response_length = 2;
409 break;
410 default:
411 response_length = 1;
412 break;
413 }
414
415 brw_set_desc(p, inst, brw_message_desc(
416 devinfo, msg_length, response_length, false));
417
418 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
419 brw_inst_set_math_msg_function(devinfo, inst, function);
420 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
421 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
422 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
423 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
424 brw_inst_set_saturate(devinfo, inst, 0);
425 }
426
427
428 static void brw_set_ff_sync_message(struct brw_codegen *p,
429 brw_inst *insn,
430 bool allocate,
431 unsigned response_length,
432 bool end_of_thread)
433 {
434 const struct gen_device_info *devinfo = p->devinfo;
435
436 brw_set_desc(p, insn, brw_message_desc(
437 devinfo, 1, response_length, true));
438
439 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
440 brw_inst_set_eot(devinfo, insn, end_of_thread);
441 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
442 brw_inst_set_urb_allocate(devinfo, insn, allocate);
443 /* The following fields are not used by FF_SYNC: */
444 brw_inst_set_urb_global_offset(devinfo, insn, 0);
445 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
446 brw_inst_set_urb_used(devinfo, insn, 0);
447 brw_inst_set_urb_complete(devinfo, insn, 0);
448 }
449
450 static void brw_set_urb_message( struct brw_codegen *p,
451 brw_inst *insn,
452 enum brw_urb_write_flags flags,
453 unsigned msg_length,
454 unsigned response_length,
455 unsigned offset,
456 unsigned swizzle_control )
457 {
458 const struct gen_device_info *devinfo = p->devinfo;
459
460 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
461 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
462 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
463
464 brw_set_desc(p, insn, brw_message_desc(
465 devinfo, msg_length, response_length, true));
466
467 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
468 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
469
470 if (flags & BRW_URB_WRITE_OWORD) {
471 assert(msg_length == 2); /* header + one OWORD of data */
472 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
473 } else {
474 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
475 }
476
477 brw_inst_set_urb_global_offset(devinfo, insn, offset);
478 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
479
480 if (devinfo->gen < 8) {
481 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
482 }
483
484 if (devinfo->gen < 7) {
485 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
486 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
487 } else {
488 brw_inst_set_urb_per_slot_offset(devinfo, insn,
489 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
490 }
491 }
492
493 static void
494 gen7_set_dp_scratch_message(struct brw_codegen *p,
495 brw_inst *inst,
496 bool write,
497 bool dword,
498 bool invalidate_after_read,
499 unsigned num_regs,
500 unsigned addr_offset,
501 unsigned mlen,
502 unsigned rlen,
503 bool header_present)
504 {
505 const struct gen_device_info *devinfo = p->devinfo;
506 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
507 (devinfo->gen >= 8 && num_regs == 8));
508 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
509 num_regs - 1);
510
511 brw_set_desc(p, inst, brw_message_desc(
512 devinfo, mlen, rlen, header_present));
513
514 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
515 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
516 brw_inst_set_scratch_read_write(devinfo, inst, write);
517 brw_inst_set_scratch_type(devinfo, inst, dword);
518 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
519 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
520 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
521 }
522
523 static void
524 brw_inst_set_state(const struct gen_device_info *devinfo,
525 brw_inst *insn,
526 const struct brw_insn_state *state)
527 {
528 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
529 brw_inst_set_group(devinfo, insn, state->group);
530 brw_inst_set_compression(devinfo, insn, state->compressed);
531 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
532 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
533 brw_inst_set_saturate(devinfo, insn, state->saturate);
534 brw_inst_set_pred_control(devinfo, insn, state->predicate);
535 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
536
537 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
538 state->access_mode == BRW_ALIGN_16) {
539 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
540 if (devinfo->gen >= 7)
541 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
542 } else {
543 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
544 if (devinfo->gen >= 7)
545 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
546 }
547
548 if (devinfo->gen >= 6)
549 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
550 }
551
552 #define next_insn brw_next_insn
553 brw_inst *
554 brw_next_insn(struct brw_codegen *p, unsigned opcode)
555 {
556 const struct gen_device_info *devinfo = p->devinfo;
557 brw_inst *insn;
558
559 if (p->nr_insn + 1 > p->store_size) {
560 p->store_size <<= 1;
561 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
562 }
563
564 p->next_insn_offset += 16;
565 insn = &p->store[p->nr_insn++];
566
567 memset(insn, 0, sizeof(*insn));
568 brw_inst_set_opcode(devinfo, insn, opcode);
569
570 /* Apply the default instruction state */
571 brw_inst_set_state(devinfo, insn, p->current);
572
573 return insn;
574 }
575
576 static brw_inst *
577 brw_alu1(struct brw_codegen *p, unsigned opcode,
578 struct brw_reg dest, struct brw_reg src)
579 {
580 brw_inst *insn = next_insn(p, opcode);
581 brw_set_dest(p, insn, dest);
582 brw_set_src0(p, insn, src);
583 return insn;
584 }
585
586 static brw_inst *
587 brw_alu2(struct brw_codegen *p, unsigned opcode,
588 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
589 {
590 /* 64-bit immediates are only supported on 1-src instructions */
591 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
592 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
593
594 brw_inst *insn = next_insn(p, opcode);
595 brw_set_dest(p, insn, dest);
596 brw_set_src0(p, insn, src0);
597 brw_set_src1(p, insn, src1);
598 return insn;
599 }
600
601 static int
602 get_3src_subreg_nr(struct brw_reg reg)
603 {
604 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
605 * use 32-bit units (components 0..7). Since they only support F/D/UD
606 * types, this doesn't lose any flexibility, but uses fewer bits.
607 */
608 return reg.subnr / 4;
609 }
610
611 static enum gen10_align1_3src_vertical_stride
612 to_3src_align1_vstride(enum brw_vertical_stride vstride)
613 {
614 switch (vstride) {
615 case BRW_VERTICAL_STRIDE_0:
616 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
617 case BRW_VERTICAL_STRIDE_2:
618 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
619 case BRW_VERTICAL_STRIDE_4:
620 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
621 case BRW_VERTICAL_STRIDE_8:
622 case BRW_VERTICAL_STRIDE_16:
623 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
624 default:
625 unreachable("invalid vstride");
626 }
627 }
628
629
630 static enum gen10_align1_3src_src_horizontal_stride
631 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
632 {
633 switch (hstride) {
634 case BRW_HORIZONTAL_STRIDE_0:
635 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
636 case BRW_HORIZONTAL_STRIDE_1:
637 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
638 case BRW_HORIZONTAL_STRIDE_2:
639 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
640 case BRW_HORIZONTAL_STRIDE_4:
641 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
642 default:
643 unreachable("invalid hstride");
644 }
645 }
646
647 static brw_inst *
648 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
649 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
650 {
651 const struct gen_device_info *devinfo = p->devinfo;
652 brw_inst *inst = next_insn(p, opcode);
653
654 gen7_convert_mrf_to_grf(p, &dest);
655
656 assert(dest.nr < 128);
657 assert(src0.nr < 128);
658 assert(src1.nr < 128);
659 assert(src2.nr < 128);
660 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
661 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
662 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
663 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
664
665 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
666 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
667 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
668
669 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
670 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
671 BRW_ALIGN1_3SRC_ACCUMULATOR);
672 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
673 } else {
674 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
675 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
676 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
677 }
678 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
679
680 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
681
682 if (brw_reg_type_is_floating_point(dest.type)) {
683 brw_inst_set_3src_a1_exec_type(devinfo, inst,
684 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
685 } else {
686 brw_inst_set_3src_a1_exec_type(devinfo, inst,
687 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
688 }
689
690 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
691 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
692 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
693 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
694
695 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
696 to_3src_align1_vstride(src0.vstride));
697 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
698 to_3src_align1_vstride(src1.vstride));
699 /* no vstride on src2 */
700
701 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
702 to_3src_align1_hstride(src0.hstride));
703 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
704 to_3src_align1_hstride(src1.hstride));
705 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
706 to_3src_align1_hstride(src2.hstride));
707
708 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
709 if (src0.type == BRW_REGISTER_TYPE_NF) {
710 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
711 } else {
712 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
713 }
714 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
715 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
716
717 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
718 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
719 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
720 } else {
721 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
722 }
723 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
724 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
725
726 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
727 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
728 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
729 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
730
731 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
732 src0.file == BRW_IMMEDIATE_VALUE ||
733 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
734 src0.type == BRW_REGISTER_TYPE_NF));
735 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
736 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
737 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
738 src2.file == BRW_IMMEDIATE_VALUE);
739
740 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
741 src0.file == BRW_GENERAL_REGISTER_FILE ?
742 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
743 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
744 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
745 src1.file == BRW_GENERAL_REGISTER_FILE ?
746 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
747 BRW_ALIGN1_3SRC_ACCUMULATOR);
748 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
749 src2.file == BRW_GENERAL_REGISTER_FILE ?
750 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
751 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
752 } else {
753 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
754 dest.file == BRW_MESSAGE_REGISTER_FILE);
755 assert(dest.type == BRW_REGISTER_TYPE_F ||
756 dest.type == BRW_REGISTER_TYPE_DF ||
757 dest.type == BRW_REGISTER_TYPE_D ||
758 dest.type == BRW_REGISTER_TYPE_UD);
759 if (devinfo->gen == 6) {
760 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
761 dest.file == BRW_MESSAGE_REGISTER_FILE);
762 }
763 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
764 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
765 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
766
767 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
768 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
769 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
770 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
771 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
772 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
773 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
774 src0.vstride == BRW_VERTICAL_STRIDE_0);
775
776 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
777 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
778 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
779 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
780 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
781 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
782 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
783 src1.vstride == BRW_VERTICAL_STRIDE_0);
784
785 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
786 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
787 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
788 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
789 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
790 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
791 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
792 src2.vstride == BRW_VERTICAL_STRIDE_0);
793
794 if (devinfo->gen >= 7) {
795 /* Set both the source and destination types based on dest.type,
796 * ignoring the source register types. The MAD and LRP emitters ensure
797 * that all four types are float. The BFE and BFI2 emitters, however,
798 * may send us mixed D and UD types and want us to ignore that and use
799 * the destination type.
800 */
801 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
802 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
803 }
804 }
805
806 return inst;
807 }
808
809
810 /***********************************************************************
811 * Convenience routines.
812 */
813 #define ALU1(OP) \
814 brw_inst *brw_##OP(struct brw_codegen *p, \
815 struct brw_reg dest, \
816 struct brw_reg src0) \
817 { \
818 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
819 }
820
821 #define ALU2(OP) \
822 brw_inst *brw_##OP(struct brw_codegen *p, \
823 struct brw_reg dest, \
824 struct brw_reg src0, \
825 struct brw_reg src1) \
826 { \
827 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
828 }
829
830 #define ALU3(OP) \
831 brw_inst *brw_##OP(struct brw_codegen *p, \
832 struct brw_reg dest, \
833 struct brw_reg src0, \
834 struct brw_reg src1, \
835 struct brw_reg src2) \
836 { \
837 if (p->current->access_mode == BRW_ALIGN_16) { \
838 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
839 src0.swizzle = BRW_SWIZZLE_XXXX; \
840 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
841 src1.swizzle = BRW_SWIZZLE_XXXX; \
842 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
843 src2.swizzle = BRW_SWIZZLE_XXXX; \
844 } \
845 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
846 }
847
848 #define ALU3F(OP) \
849 brw_inst *brw_##OP(struct brw_codegen *p, \
850 struct brw_reg dest, \
851 struct brw_reg src0, \
852 struct brw_reg src1, \
853 struct brw_reg src2) \
854 { \
855 assert(dest.type == BRW_REGISTER_TYPE_F || \
856 dest.type == BRW_REGISTER_TYPE_DF); \
857 if (dest.type == BRW_REGISTER_TYPE_F) { \
858 assert(src0.type == BRW_REGISTER_TYPE_F); \
859 assert(src1.type == BRW_REGISTER_TYPE_F); \
860 assert(src2.type == BRW_REGISTER_TYPE_F); \
861 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
862 assert(src0.type == BRW_REGISTER_TYPE_DF); \
863 assert(src1.type == BRW_REGISTER_TYPE_DF); \
864 assert(src2.type == BRW_REGISTER_TYPE_DF); \
865 } \
866 \
867 if (p->current->access_mode == BRW_ALIGN_16) { \
868 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
869 src0.swizzle = BRW_SWIZZLE_XXXX; \
870 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
871 src1.swizzle = BRW_SWIZZLE_XXXX; \
872 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
873 src2.swizzle = BRW_SWIZZLE_XXXX; \
874 } \
875 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
876 }
877
878 /* Rounding operations (other than RNDD) require two instructions - the first
879 * stores a rounded value (possibly the wrong way) in the dest register, but
880 * also sets a per-channel "increment bit" in the flag register. A predicated
881 * add of 1.0 fixes dest to contain the desired result.
882 *
883 * Sandybridge and later appear to round correctly without an ADD.
884 */
885 #define ROUND(OP) \
886 void brw_##OP(struct brw_codegen *p, \
887 struct brw_reg dest, \
888 struct brw_reg src) \
889 { \
890 const struct gen_device_info *devinfo = p->devinfo; \
891 brw_inst *rnd, *add; \
892 rnd = next_insn(p, BRW_OPCODE_##OP); \
893 brw_set_dest(p, rnd, dest); \
894 brw_set_src0(p, rnd, src); \
895 \
896 if (devinfo->gen < 6) { \
897 /* turn on round-increments */ \
898 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
899 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
900 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
901 } \
902 }
903
904
905 ALU2(SEL)
906 ALU1(NOT)
907 ALU2(AND)
908 ALU2(OR)
909 ALU2(XOR)
910 ALU2(SHR)
911 ALU2(SHL)
912 ALU1(DIM)
913 ALU2(ASR)
914 ALU3(CSEL)
915 ALU1(FRC)
916 ALU1(RNDD)
917 ALU2(MAC)
918 ALU2(MACH)
919 ALU1(LZD)
920 ALU2(DP4)
921 ALU2(DPH)
922 ALU2(DP3)
923 ALU2(DP2)
924 ALU3(MAD)
925 ALU3F(LRP)
926 ALU1(BFREV)
927 ALU3(BFE)
928 ALU2(BFI1)
929 ALU3(BFI2)
930 ALU1(FBH)
931 ALU1(FBL)
932 ALU1(CBIT)
933 ALU2(ADDC)
934 ALU2(SUBB)
935
936 ROUND(RNDZ)
937 ROUND(RNDE)
938
939 brw_inst *
940 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
941 {
942 const struct gen_device_info *devinfo = p->devinfo;
943
944 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
945 * To avoid the problems that causes, we use an <X,2,0> source region to
946 * read each element twice.
947 */
948 if (devinfo->gen == 7 && !devinfo->is_haswell &&
949 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
950 dest.type == BRW_REGISTER_TYPE_DF &&
951 (src0.type == BRW_REGISTER_TYPE_F ||
952 src0.type == BRW_REGISTER_TYPE_D ||
953 src0.type == BRW_REGISTER_TYPE_UD) &&
954 !has_scalar_region(src0)) {
955 assert(src0.vstride == src0.width + src0.hstride);
956 src0.vstride = src0.hstride;
957 src0.width = BRW_WIDTH_2;
958 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
959 }
960
961 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
962 }
963
964 brw_inst *
965 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
966 struct brw_reg src0, struct brw_reg src1)
967 {
968 /* 6.2.2: add */
969 if (src0.type == BRW_REGISTER_TYPE_F ||
970 (src0.file == BRW_IMMEDIATE_VALUE &&
971 src0.type == BRW_REGISTER_TYPE_VF)) {
972 assert(src1.type != BRW_REGISTER_TYPE_UD);
973 assert(src1.type != BRW_REGISTER_TYPE_D);
974 }
975
976 if (src1.type == BRW_REGISTER_TYPE_F ||
977 (src1.file == BRW_IMMEDIATE_VALUE &&
978 src1.type == BRW_REGISTER_TYPE_VF)) {
979 assert(src0.type != BRW_REGISTER_TYPE_UD);
980 assert(src0.type != BRW_REGISTER_TYPE_D);
981 }
982
983 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
984 }
985
986 brw_inst *
987 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
988 struct brw_reg src0, struct brw_reg src1)
989 {
990 assert(dest.type == src0.type);
991 assert(src0.type == src1.type);
992 switch (src0.type) {
993 case BRW_REGISTER_TYPE_B:
994 case BRW_REGISTER_TYPE_UB:
995 case BRW_REGISTER_TYPE_W:
996 case BRW_REGISTER_TYPE_UW:
997 case BRW_REGISTER_TYPE_D:
998 case BRW_REGISTER_TYPE_UD:
999 break;
1000 default:
1001 unreachable("Bad type for brw_AVG");
1002 }
1003
1004 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1005 }
1006
1007 brw_inst *
1008 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1009 struct brw_reg src0, struct brw_reg src1)
1010 {
1011 /* 6.32.38: mul */
1012 if (src0.type == BRW_REGISTER_TYPE_D ||
1013 src0.type == BRW_REGISTER_TYPE_UD ||
1014 src1.type == BRW_REGISTER_TYPE_D ||
1015 src1.type == BRW_REGISTER_TYPE_UD) {
1016 assert(dest.type != BRW_REGISTER_TYPE_F);
1017 }
1018
1019 if (src0.type == BRW_REGISTER_TYPE_F ||
1020 (src0.file == BRW_IMMEDIATE_VALUE &&
1021 src0.type == BRW_REGISTER_TYPE_VF)) {
1022 assert(src1.type != BRW_REGISTER_TYPE_UD);
1023 assert(src1.type != BRW_REGISTER_TYPE_D);
1024 }
1025
1026 if (src1.type == BRW_REGISTER_TYPE_F ||
1027 (src1.file == BRW_IMMEDIATE_VALUE &&
1028 src1.type == BRW_REGISTER_TYPE_VF)) {
1029 assert(src0.type != BRW_REGISTER_TYPE_UD);
1030 assert(src0.type != BRW_REGISTER_TYPE_D);
1031 }
1032
1033 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1034 src0.nr != BRW_ARF_ACCUMULATOR);
1035 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1036 src1.nr != BRW_ARF_ACCUMULATOR);
1037
1038 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1039 }
1040
1041 brw_inst *
1042 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1043 struct brw_reg src0, struct brw_reg src1)
1044 {
1045 src0.vstride = BRW_VERTICAL_STRIDE_0;
1046 src0.width = BRW_WIDTH_1;
1047 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1048 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1049 }
1050
1051 brw_inst *
1052 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1053 struct brw_reg src0, struct brw_reg src1)
1054 {
1055 src0.vstride = BRW_VERTICAL_STRIDE_0;
1056 src0.width = BRW_WIDTH_1;
1057 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1058 src1.vstride = BRW_VERTICAL_STRIDE_8;
1059 src1.width = BRW_WIDTH_8;
1060 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1061 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1062 }
1063
1064 brw_inst *
1065 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1066 {
1067 const struct gen_device_info *devinfo = p->devinfo;
1068 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1069 /* The F32TO16 instruction doesn't support 32-bit destination types in
1070 * Align1 mode, and neither does the Gen8 implementation in terms of a
1071 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1072 * an undocumented feature.
1073 */
1074 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1075 (!align16 || devinfo->gen >= 8));
1076 brw_inst *inst;
1077
1078 if (align16) {
1079 assert(dst.type == BRW_REGISTER_TYPE_UD);
1080 } else {
1081 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1082 dst.type == BRW_REGISTER_TYPE_W ||
1083 dst.type == BRW_REGISTER_TYPE_UW ||
1084 dst.type == BRW_REGISTER_TYPE_HF);
1085 }
1086
1087 brw_push_insn_state(p);
1088
1089 if (needs_zero_fill) {
1090 brw_set_default_access_mode(p, BRW_ALIGN_1);
1091 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1092 }
1093
1094 if (devinfo->gen >= 8) {
1095 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1096 } else {
1097 assert(devinfo->gen == 7);
1098 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1099 }
1100
1101 if (needs_zero_fill) {
1102 brw_inst_set_no_dd_clear(devinfo, inst, true);
1103 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1104 brw_inst_set_no_dd_check(devinfo, inst, true);
1105 }
1106
1107 brw_pop_insn_state(p);
1108 return inst;
1109 }
1110
1111 brw_inst *
1112 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1113 {
1114 const struct gen_device_info *devinfo = p->devinfo;
1115 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1116
1117 if (align16) {
1118 assert(src.type == BRW_REGISTER_TYPE_UD);
1119 } else {
1120 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1121 *
1122 * Because this instruction does not have a 16-bit floating-point
1123 * type, the source data type must be Word (W). The destination type
1124 * must be F (Float).
1125 */
1126 if (src.type == BRW_REGISTER_TYPE_UD)
1127 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1128
1129 assert(src.type == BRW_REGISTER_TYPE_W ||
1130 src.type == BRW_REGISTER_TYPE_UW ||
1131 src.type == BRW_REGISTER_TYPE_HF);
1132 }
1133
1134 if (devinfo->gen >= 8) {
1135 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1136 } else {
1137 assert(devinfo->gen == 7);
1138 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1139 }
1140 }
1141
1142
1143 void brw_NOP(struct brw_codegen *p)
1144 {
1145 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1146 memset(insn, 0, sizeof(*insn));
1147 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1148 }
1149
1150
1151
1152
1153
1154 /***********************************************************************
1155 * Comparisons, if/else/endif
1156 */
1157
1158 brw_inst *
1159 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1160 unsigned predicate_control)
1161 {
1162 const struct gen_device_info *devinfo = p->devinfo;
1163 struct brw_reg ip = brw_ip_reg();
1164 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1165
1166 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1167 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1168 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1169 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1170
1171 return inst;
1172 }
1173
1174 static void
1175 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1176 {
1177 p->if_stack[p->if_stack_depth] = inst - p->store;
1178
1179 p->if_stack_depth++;
1180 if (p->if_stack_array_size <= p->if_stack_depth) {
1181 p->if_stack_array_size *= 2;
1182 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1183 p->if_stack_array_size);
1184 }
1185 }
1186
1187 static brw_inst *
1188 pop_if_stack(struct brw_codegen *p)
1189 {
1190 p->if_stack_depth--;
1191 return &p->store[p->if_stack[p->if_stack_depth]];
1192 }
1193
1194 static void
1195 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1196 {
1197 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1198 p->loop_stack_array_size *= 2;
1199 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1200 p->loop_stack_array_size);
1201 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1202 p->loop_stack_array_size);
1203 }
1204
1205 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1206 p->loop_stack_depth++;
1207 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1208 }
1209
1210 static brw_inst *
1211 get_inner_do_insn(struct brw_codegen *p)
1212 {
1213 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1214 }
1215
1216 /* EU takes the value from the flag register and pushes it onto some
1217 * sort of a stack (presumably merging with any flag value already on
1218 * the stack). Within an if block, the flags at the top of the stack
1219 * control execution on each channel of the unit, eg. on each of the
1220 * 16 pixel values in our wm programs.
1221 *
1222 * When the matching 'else' instruction is reached (presumably by
1223 * countdown of the instruction count patched in by our ELSE/ENDIF
1224 * functions), the relevant flags are inverted.
1225 *
1226 * When the matching 'endif' instruction is reached, the flags are
1227 * popped off. If the stack is now empty, normal execution resumes.
1228 */
1229 brw_inst *
1230 brw_IF(struct brw_codegen *p, unsigned execute_size)
1231 {
1232 const struct gen_device_info *devinfo = p->devinfo;
1233 brw_inst *insn;
1234
1235 insn = next_insn(p, BRW_OPCODE_IF);
1236
1237 /* Override the defaults for this instruction:
1238 */
1239 if (devinfo->gen < 6) {
1240 brw_set_dest(p, insn, brw_ip_reg());
1241 brw_set_src0(p, insn, brw_ip_reg());
1242 brw_set_src1(p, insn, brw_imm_d(0x0));
1243 } else if (devinfo->gen == 6) {
1244 brw_set_dest(p, insn, brw_imm_w(0));
1245 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1246 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1247 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1248 } else if (devinfo->gen == 7) {
1249 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1250 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1251 brw_set_src1(p, insn, brw_imm_w(0));
1252 brw_inst_set_jip(devinfo, insn, 0);
1253 brw_inst_set_uip(devinfo, insn, 0);
1254 } else {
1255 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1256 brw_set_src0(p, insn, brw_imm_d(0));
1257 brw_inst_set_jip(devinfo, insn, 0);
1258 brw_inst_set_uip(devinfo, insn, 0);
1259 }
1260
1261 brw_inst_set_exec_size(devinfo, insn, execute_size);
1262 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1263 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1264 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1265 if (!p->single_program_flow && devinfo->gen < 6)
1266 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1267
1268 push_if_stack(p, insn);
1269 p->if_depth_in_loop[p->loop_stack_depth]++;
1270 return insn;
1271 }
1272
1273 /* This function is only used for gen6-style IF instructions with an
1274 * embedded comparison (conditional modifier). It is not used on gen7.
1275 */
1276 brw_inst *
1277 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1278 struct brw_reg src0, struct brw_reg src1)
1279 {
1280 const struct gen_device_info *devinfo = p->devinfo;
1281 brw_inst *insn;
1282
1283 insn = next_insn(p, BRW_OPCODE_IF);
1284
1285 brw_set_dest(p, insn, brw_imm_w(0));
1286 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1287 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1288 brw_set_src0(p, insn, src0);
1289 brw_set_src1(p, insn, src1);
1290
1291 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1292 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1293 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1294
1295 push_if_stack(p, insn);
1296 return insn;
1297 }
1298
1299 /**
1300 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1301 */
1302 static void
1303 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1304 brw_inst *if_inst, brw_inst *else_inst)
1305 {
1306 const struct gen_device_info *devinfo = p->devinfo;
1307
1308 /* The next instruction (where the ENDIF would be, if it existed) */
1309 brw_inst *next_inst = &p->store[p->nr_insn];
1310
1311 assert(p->single_program_flow);
1312 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1313 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1314 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1315
1316 /* Convert IF to an ADD instruction that moves the instruction pointer
1317 * to the first instruction of the ELSE block. If there is no ELSE
1318 * block, point to where ENDIF would be. Reverse the predicate.
1319 *
1320 * There's no need to execute an ENDIF since we don't need to do any
1321 * stack operations, and if we're currently executing, we just want to
1322 * continue normally.
1323 */
1324 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1325 brw_inst_set_pred_inv(devinfo, if_inst, true);
1326
1327 if (else_inst != NULL) {
1328 /* Convert ELSE to an ADD instruction that points where the ENDIF
1329 * would be.
1330 */
1331 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1332
1333 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1334 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1335 } else {
1336 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1337 }
1338 }
1339
1340 /**
1341 * Patch IF and ELSE instructions with appropriate jump targets.
1342 */
1343 static void
1344 patch_IF_ELSE(struct brw_codegen *p,
1345 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1346 {
1347 const struct gen_device_info *devinfo = p->devinfo;
1348
1349 /* We shouldn't be patching IF and ELSE instructions in single program flow
1350 * mode when gen < 6, because in single program flow mode on those
1351 * platforms, we convert flow control instructions to conditional ADDs that
1352 * operate on IP (see brw_ENDIF).
1353 *
1354 * However, on Gen6, writing to IP doesn't work in single program flow mode
1355 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1356 * not be updated by non-flow control instructions."). And on later
1357 * platforms, there is no significant benefit to converting control flow
1358 * instructions to conditional ADDs. So we do patch IF and ELSE
1359 * instructions in single program flow mode on those platforms.
1360 */
1361 if (devinfo->gen < 6)
1362 assert(!p->single_program_flow);
1363
1364 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1365 assert(endif_inst != NULL);
1366 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1367
1368 unsigned br = brw_jump_scale(devinfo);
1369
1370 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1371 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1372
1373 if (else_inst == NULL) {
1374 /* Patch IF -> ENDIF */
1375 if (devinfo->gen < 6) {
1376 /* Turn it into an IFF, which means no mask stack operations for
1377 * all-false and jumping past the ENDIF.
1378 */
1379 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1380 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1381 br * (endif_inst - if_inst + 1));
1382 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1383 } else if (devinfo->gen == 6) {
1384 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1385 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1386 } else {
1387 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1388 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1389 }
1390 } else {
1391 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1392
1393 /* Patch IF -> ELSE */
1394 if (devinfo->gen < 6) {
1395 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1396 br * (else_inst - if_inst));
1397 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1398 } else if (devinfo->gen == 6) {
1399 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1400 br * (else_inst - if_inst + 1));
1401 }
1402
1403 /* Patch ELSE -> ENDIF */
1404 if (devinfo->gen < 6) {
1405 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1406 * matching ENDIF.
1407 */
1408 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1409 br * (endif_inst - else_inst + 1));
1410 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1411 } else if (devinfo->gen == 6) {
1412 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1413 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1414 br * (endif_inst - else_inst));
1415 } else {
1416 /* The IF instruction's JIP should point just past the ELSE */
1417 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1418 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1419 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1420 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1421 if (devinfo->gen >= 8) {
1422 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1423 * should point to ENDIF.
1424 */
1425 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1426 }
1427 }
1428 }
1429 }
1430
1431 void
1432 brw_ELSE(struct brw_codegen *p)
1433 {
1434 const struct gen_device_info *devinfo = p->devinfo;
1435 brw_inst *insn;
1436
1437 insn = next_insn(p, BRW_OPCODE_ELSE);
1438
1439 if (devinfo->gen < 6) {
1440 brw_set_dest(p, insn, brw_ip_reg());
1441 brw_set_src0(p, insn, brw_ip_reg());
1442 brw_set_src1(p, insn, brw_imm_d(0x0));
1443 } else if (devinfo->gen == 6) {
1444 brw_set_dest(p, insn, brw_imm_w(0));
1445 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1446 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1447 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1448 } else if (devinfo->gen == 7) {
1449 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1450 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1451 brw_set_src1(p, insn, brw_imm_w(0));
1452 brw_inst_set_jip(devinfo, insn, 0);
1453 brw_inst_set_uip(devinfo, insn, 0);
1454 } else {
1455 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1456 brw_set_src0(p, insn, brw_imm_d(0));
1457 brw_inst_set_jip(devinfo, insn, 0);
1458 brw_inst_set_uip(devinfo, insn, 0);
1459 }
1460
1461 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1462 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1463 if (!p->single_program_flow && devinfo->gen < 6)
1464 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1465
1466 push_if_stack(p, insn);
1467 }
1468
1469 void
1470 brw_ENDIF(struct brw_codegen *p)
1471 {
1472 const struct gen_device_info *devinfo = p->devinfo;
1473 brw_inst *insn = NULL;
1474 brw_inst *else_inst = NULL;
1475 brw_inst *if_inst = NULL;
1476 brw_inst *tmp;
1477 bool emit_endif = true;
1478
1479 /* In single program flow mode, we can express IF and ELSE instructions
1480 * equivalently as ADD instructions that operate on IP. On platforms prior
1481 * to Gen6, flow control instructions cause an implied thread switch, so
1482 * this is a significant savings.
1483 *
1484 * However, on Gen6, writing to IP doesn't work in single program flow mode
1485 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1486 * not be updated by non-flow control instructions."). And on later
1487 * platforms, there is no significant benefit to converting control flow
1488 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1489 * Gen5.
1490 */
1491 if (devinfo->gen < 6 && p->single_program_flow)
1492 emit_endif = false;
1493
1494 /*
1495 * A single next_insn() may change the base address of instruction store
1496 * memory(p->store), so call it first before referencing the instruction
1497 * store pointer from an index
1498 */
1499 if (emit_endif)
1500 insn = next_insn(p, BRW_OPCODE_ENDIF);
1501
1502 /* Pop the IF and (optional) ELSE instructions from the stack */
1503 p->if_depth_in_loop[p->loop_stack_depth]--;
1504 tmp = pop_if_stack(p);
1505 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1506 else_inst = tmp;
1507 tmp = pop_if_stack(p);
1508 }
1509 if_inst = tmp;
1510
1511 if (!emit_endif) {
1512 /* ENDIF is useless; don't bother emitting it. */
1513 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1514 return;
1515 }
1516
1517 if (devinfo->gen < 6) {
1518 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1519 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1520 brw_set_src1(p, insn, brw_imm_d(0x0));
1521 } else if (devinfo->gen == 6) {
1522 brw_set_dest(p, insn, brw_imm_w(0));
1523 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1524 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1525 } else if (devinfo->gen == 7) {
1526 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1527 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1528 brw_set_src1(p, insn, brw_imm_w(0));
1529 } else {
1530 brw_set_src0(p, insn, brw_imm_d(0));
1531 }
1532
1533 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1534 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1535 if (devinfo->gen < 6)
1536 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1537
1538 /* Also pop item off the stack in the endif instruction: */
1539 if (devinfo->gen < 6) {
1540 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1541 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1542 } else if (devinfo->gen == 6) {
1543 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1544 } else {
1545 brw_inst_set_jip(devinfo, insn, 2);
1546 }
1547 patch_IF_ELSE(p, if_inst, else_inst, insn);
1548 }
1549
1550 brw_inst *
1551 brw_BREAK(struct brw_codegen *p)
1552 {
1553 const struct gen_device_info *devinfo = p->devinfo;
1554 brw_inst *insn;
1555
1556 insn = next_insn(p, BRW_OPCODE_BREAK);
1557 if (devinfo->gen >= 8) {
1558 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1559 brw_set_src0(p, insn, brw_imm_d(0x0));
1560 } else if (devinfo->gen >= 6) {
1561 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1562 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1563 brw_set_src1(p, insn, brw_imm_d(0x0));
1564 } else {
1565 brw_set_dest(p, insn, brw_ip_reg());
1566 brw_set_src0(p, insn, brw_ip_reg());
1567 brw_set_src1(p, insn, brw_imm_d(0x0));
1568 brw_inst_set_gen4_pop_count(devinfo, insn,
1569 p->if_depth_in_loop[p->loop_stack_depth]);
1570 }
1571 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1572 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1573
1574 return insn;
1575 }
1576
1577 brw_inst *
1578 brw_CONT(struct brw_codegen *p)
1579 {
1580 const struct gen_device_info *devinfo = p->devinfo;
1581 brw_inst *insn;
1582
1583 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1584 brw_set_dest(p, insn, brw_ip_reg());
1585 if (devinfo->gen >= 8) {
1586 brw_set_src0(p, insn, brw_imm_d(0x0));
1587 } else {
1588 brw_set_src0(p, insn, brw_ip_reg());
1589 brw_set_src1(p, insn, brw_imm_d(0x0));
1590 }
1591
1592 if (devinfo->gen < 6) {
1593 brw_inst_set_gen4_pop_count(devinfo, insn,
1594 p->if_depth_in_loop[p->loop_stack_depth]);
1595 }
1596 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1597 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1598 return insn;
1599 }
1600
1601 brw_inst *
1602 gen6_HALT(struct brw_codegen *p)
1603 {
1604 const struct gen_device_info *devinfo = p->devinfo;
1605 brw_inst *insn;
1606
1607 insn = next_insn(p, BRW_OPCODE_HALT);
1608 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1609 if (devinfo->gen >= 8) {
1610 brw_set_src0(p, insn, brw_imm_d(0x0));
1611 } else {
1612 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1613 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1614 }
1615
1616 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1617 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1618 return insn;
1619 }
1620
1621 /* DO/WHILE loop:
1622 *
1623 * The DO/WHILE is just an unterminated loop -- break or continue are
1624 * used for control within the loop. We have a few ways they can be
1625 * done.
1626 *
1627 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1628 * jip and no DO instruction.
1629 *
1630 * For non-uniform control flow pre-gen6, there's a DO instruction to
1631 * push the mask, and a WHILE to jump back, and BREAK to get out and
1632 * pop the mask.
1633 *
1634 * For gen6, there's no more mask stack, so no need for DO. WHILE
1635 * just points back to the first instruction of the loop.
1636 */
1637 brw_inst *
1638 brw_DO(struct brw_codegen *p, unsigned execute_size)
1639 {
1640 const struct gen_device_info *devinfo = p->devinfo;
1641
1642 if (devinfo->gen >= 6 || p->single_program_flow) {
1643 push_loop_stack(p, &p->store[p->nr_insn]);
1644 return &p->store[p->nr_insn];
1645 } else {
1646 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1647
1648 push_loop_stack(p, insn);
1649
1650 /* Override the defaults for this instruction:
1651 */
1652 brw_set_dest(p, insn, brw_null_reg());
1653 brw_set_src0(p, insn, brw_null_reg());
1654 brw_set_src1(p, insn, brw_null_reg());
1655
1656 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1657 brw_inst_set_exec_size(devinfo, insn, execute_size);
1658 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1659
1660 return insn;
1661 }
1662 }
1663
1664 /**
1665 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1666 * instruction here.
1667 *
1668 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1669 * nesting, since it can always just point to the end of the block/current loop.
1670 */
1671 static void
1672 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1673 {
1674 const struct gen_device_info *devinfo = p->devinfo;
1675 brw_inst *do_inst = get_inner_do_insn(p);
1676 brw_inst *inst;
1677 unsigned br = brw_jump_scale(devinfo);
1678
1679 assert(devinfo->gen < 6);
1680
1681 for (inst = while_inst - 1; inst != do_inst; inst--) {
1682 /* If the jump count is != 0, that means that this instruction has already
1683 * been patched because it's part of a loop inside of the one we're
1684 * patching.
1685 */
1686 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1687 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1688 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1689 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1690 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1691 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1692 }
1693 }
1694 }
1695
1696 brw_inst *
1697 brw_WHILE(struct brw_codegen *p)
1698 {
1699 const struct gen_device_info *devinfo = p->devinfo;
1700 brw_inst *insn, *do_insn;
1701 unsigned br = brw_jump_scale(devinfo);
1702
1703 if (devinfo->gen >= 6) {
1704 insn = next_insn(p, BRW_OPCODE_WHILE);
1705 do_insn = get_inner_do_insn(p);
1706
1707 if (devinfo->gen >= 8) {
1708 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1709 brw_set_src0(p, insn, brw_imm_d(0));
1710 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1711 } else if (devinfo->gen == 7) {
1712 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1713 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1714 brw_set_src1(p, insn, brw_imm_w(0));
1715 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1716 } else {
1717 brw_set_dest(p, insn, brw_imm_w(0));
1718 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1719 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1720 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1721 }
1722
1723 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1724
1725 } else {
1726 if (p->single_program_flow) {
1727 insn = next_insn(p, BRW_OPCODE_ADD);
1728 do_insn = get_inner_do_insn(p);
1729
1730 brw_set_dest(p, insn, brw_ip_reg());
1731 brw_set_src0(p, insn, brw_ip_reg());
1732 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1733 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1734 } else {
1735 insn = next_insn(p, BRW_OPCODE_WHILE);
1736 do_insn = get_inner_do_insn(p);
1737
1738 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1739
1740 brw_set_dest(p, insn, brw_ip_reg());
1741 brw_set_src0(p, insn, brw_ip_reg());
1742 brw_set_src1(p, insn, brw_imm_d(0));
1743
1744 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1745 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1746 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1747
1748 brw_patch_break_cont(p, insn);
1749 }
1750 }
1751 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1752
1753 p->loop_stack_depth--;
1754
1755 return insn;
1756 }
1757
1758 /* FORWARD JUMPS:
1759 */
1760 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1761 {
1762 const struct gen_device_info *devinfo = p->devinfo;
1763 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1764 unsigned jmpi = 1;
1765
1766 if (devinfo->gen >= 5)
1767 jmpi = 2;
1768
1769 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1770 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1771
1772 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1773 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1774 }
1775
1776 /* To integrate with the above, it makes sense that the comparison
1777 * instruction should populate the flag register. It might be simpler
1778 * just to use the flag reg for most WM tasks?
1779 */
1780 void brw_CMP(struct brw_codegen *p,
1781 struct brw_reg dest,
1782 unsigned conditional,
1783 struct brw_reg src0,
1784 struct brw_reg src1)
1785 {
1786 const struct gen_device_info *devinfo = p->devinfo;
1787 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1788
1789 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1790 brw_set_dest(p, insn, dest);
1791 brw_set_src0(p, insn, src0);
1792 brw_set_src1(p, insn, src1);
1793
1794 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1795 * page says:
1796 * "Any CMP instruction with a null destination must use a {switch}."
1797 *
1798 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1799 * mentioned on their work-arounds pages.
1800 */
1801 if (devinfo->gen == 7) {
1802 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1803 dest.nr == BRW_ARF_NULL) {
1804 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1805 }
1806 }
1807 }
1808
1809 /***********************************************************************
1810 * Helpers for the various SEND message types:
1811 */
1812
1813 /** Extended math function, float[8].
1814 */
1815 void gen4_math(struct brw_codegen *p,
1816 struct brw_reg dest,
1817 unsigned function,
1818 unsigned msg_reg_nr,
1819 struct brw_reg src,
1820 unsigned precision )
1821 {
1822 const struct gen_device_info *devinfo = p->devinfo;
1823 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1824 unsigned data_type;
1825 if (has_scalar_region(src)) {
1826 data_type = BRW_MATH_DATA_SCALAR;
1827 } else {
1828 data_type = BRW_MATH_DATA_VECTOR;
1829 }
1830
1831 assert(devinfo->gen < 6);
1832
1833 /* Example code doesn't set predicate_control for send
1834 * instructions.
1835 */
1836 brw_inst_set_pred_control(devinfo, insn, 0);
1837 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1838
1839 brw_set_dest(p, insn, dest);
1840 brw_set_src0(p, insn, src);
1841 brw_set_math_message(p,
1842 insn,
1843 function,
1844 src.type == BRW_REGISTER_TYPE_D,
1845 precision,
1846 data_type);
1847 }
1848
1849 void gen6_math(struct brw_codegen *p,
1850 struct brw_reg dest,
1851 unsigned function,
1852 struct brw_reg src0,
1853 struct brw_reg src1)
1854 {
1855 const struct gen_device_info *devinfo = p->devinfo;
1856 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1857
1858 assert(devinfo->gen >= 6);
1859
1860 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1861 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1862
1863 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1864 if (devinfo->gen == 6) {
1865 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1866 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1867 }
1868
1869 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1870 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1871 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1872 assert(src0.type != BRW_REGISTER_TYPE_F);
1873 assert(src1.type != BRW_REGISTER_TYPE_F);
1874 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1875 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1876 } else {
1877 assert(src0.type == BRW_REGISTER_TYPE_F);
1878 assert(src1.type == BRW_REGISTER_TYPE_F);
1879 }
1880
1881 /* Source modifiers are ignored for extended math instructions on Gen6. */
1882 if (devinfo->gen == 6) {
1883 assert(!src0.negate);
1884 assert(!src0.abs);
1885 assert(!src1.negate);
1886 assert(!src1.abs);
1887 }
1888
1889 brw_inst_set_math_function(devinfo, insn, function);
1890
1891 brw_set_dest(p, insn, dest);
1892 brw_set_src0(p, insn, src0);
1893 brw_set_src1(p, insn, src1);
1894 }
1895
1896 /**
1897 * Return the right surface index to access the thread scratch space using
1898 * stateless dataport messages.
1899 */
1900 unsigned
1901 brw_scratch_surface_idx(const struct brw_codegen *p)
1902 {
1903 /* The scratch space is thread-local so IA coherency is unnecessary. */
1904 if (p->devinfo->gen >= 8)
1905 return GEN8_BTI_STATELESS_NON_COHERENT;
1906 else
1907 return BRW_BTI_STATELESS;
1908 }
1909
1910 /**
1911 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1912 * using a constant offset per channel.
1913 *
1914 * The offset must be aligned to oword size (16 bytes). Used for
1915 * register spilling.
1916 */
1917 void brw_oword_block_write_scratch(struct brw_codegen *p,
1918 struct brw_reg mrf,
1919 int num_regs,
1920 unsigned offset)
1921 {
1922 const struct gen_device_info *devinfo = p->devinfo;
1923 const unsigned target_cache =
1924 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1925 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1926 BRW_SFID_DATAPORT_WRITE);
1927 uint32_t msg_type;
1928
1929 if (devinfo->gen >= 6)
1930 offset /= 16;
1931
1932 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1933
1934 const unsigned mlen = 1 + num_regs;
1935
1936 /* Set up the message header. This is g0, with g0.2 filled with
1937 * the offset. We don't want to leave our offset around in g0 or
1938 * it'll screw up texture samples, so set it up inside the message
1939 * reg.
1940 */
1941 {
1942 brw_push_insn_state(p);
1943 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1944 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1945 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1946
1947 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1948
1949 /* set message header global offset field (reg 0, element 2) */
1950 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1951 brw_MOV(p,
1952 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1953 mrf.nr,
1954 2), BRW_REGISTER_TYPE_UD),
1955 brw_imm_ud(offset));
1956
1957 brw_pop_insn_state(p);
1958 }
1959
1960 {
1961 struct brw_reg dest;
1962 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1963 int send_commit_msg;
1964 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1965 BRW_REGISTER_TYPE_UW);
1966
1967 brw_inst_set_sfid(devinfo, insn, target_cache);
1968 brw_inst_set_compression(devinfo, insn, false);
1969
1970 if (brw_inst_exec_size(devinfo, insn) >= 16)
1971 src_header = vec16(src_header);
1972
1973 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1974 if (devinfo->gen < 6)
1975 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
1976
1977 /* Until gen6, writes followed by reads from the same location
1978 * are not guaranteed to be ordered unless write_commit is set.
1979 * If set, then a no-op write is issued to the destination
1980 * register to set a dependency, and a read from the destination
1981 * can be used to ensure the ordering.
1982 *
1983 * For gen6, only writes between different threads need ordering
1984 * protection. Our use of DP writes is all about register
1985 * spilling within a thread.
1986 */
1987 if (devinfo->gen >= 6) {
1988 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1989 send_commit_msg = 0;
1990 } else {
1991 dest = src_header;
1992 send_commit_msg = 1;
1993 }
1994
1995 brw_set_dest(p, insn, dest);
1996 if (devinfo->gen >= 6) {
1997 brw_set_src0(p, insn, mrf);
1998 } else {
1999 brw_set_src0(p, insn, brw_null_reg());
2000 }
2001
2002 if (devinfo->gen >= 6)
2003 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2004 else
2005 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2006
2007 brw_set_desc(p, insn,
2008 brw_message_desc(devinfo, mlen, send_commit_msg, true) |
2009 brw_dp_write_desc(devinfo, brw_scratch_surface_idx(p),
2010 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2011 msg_type, 0, /* not a render target */
2012 send_commit_msg));
2013 }
2014 }
2015
2016
2017 /**
2018 * Read a block of owords (half a GRF each) from the scratch buffer
2019 * using a constant index per channel.
2020 *
2021 * Offset must be aligned to oword size (16 bytes). Used for register
2022 * spilling.
2023 */
2024 void
2025 brw_oword_block_read_scratch(struct brw_codegen *p,
2026 struct brw_reg dest,
2027 struct brw_reg mrf,
2028 int num_regs,
2029 unsigned offset)
2030 {
2031 const struct gen_device_info *devinfo = p->devinfo;
2032
2033 if (devinfo->gen >= 6)
2034 offset /= 16;
2035
2036 if (p->devinfo->gen >= 7) {
2037 /* On gen 7 and above, we no longer have message registers and we can
2038 * send from any register we want. By using the destination register
2039 * for the message, we guarantee that the implied message write won't
2040 * accidentally overwrite anything. This has been a problem because
2041 * the MRF registers and source for the final FB write are both fixed
2042 * and may overlap.
2043 */
2044 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2045 } else {
2046 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2047 }
2048 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2049
2050 const unsigned rlen = num_regs;
2051 const unsigned target_cache =
2052 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2053 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2054 BRW_SFID_DATAPORT_READ);
2055
2056 {
2057 brw_push_insn_state(p);
2058 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2059 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2060 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2061
2062 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2063
2064 /* set message header global offset field (reg 0, element 2) */
2065 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2066 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2067
2068 brw_pop_insn_state(p);
2069 }
2070
2071 {
2072 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2073
2074 brw_inst_set_sfid(devinfo, insn, target_cache);
2075 assert(brw_inst_pred_control(devinfo, insn) == 0);
2076 brw_inst_set_compression(devinfo, insn, false);
2077
2078 brw_set_dest(p, insn, dest); /* UW? */
2079 if (devinfo->gen >= 6) {
2080 brw_set_src0(p, insn, mrf);
2081 } else {
2082 brw_set_src0(p, insn, brw_null_reg());
2083 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2084 }
2085
2086 brw_set_desc(p, insn,
2087 brw_message_desc(devinfo, 1, rlen, true) |
2088 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2089 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2090 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2091 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2092 }
2093 }
2094
2095 void
2096 gen7_block_read_scratch(struct brw_codegen *p,
2097 struct brw_reg dest,
2098 int num_regs,
2099 unsigned offset)
2100 {
2101 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2102 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2103
2104 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2105
2106 /* The HW requires that the header is present; this is to get the g0.5
2107 * scratch offset.
2108 */
2109 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2110
2111 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2112 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2113 * is 32 bytes, which happens to be the size of a register.
2114 */
2115 offset /= REG_SIZE;
2116 assert(offset < (1 << 12));
2117
2118 gen7_set_dp_scratch_message(p, insn,
2119 false, /* scratch read */
2120 false, /* OWords */
2121 false, /* invalidate after read */
2122 num_regs,
2123 offset,
2124 1, /* mlen: just g0 */
2125 num_regs, /* rlen */
2126 true); /* header present */
2127 }
2128
2129 /**
2130 * Read float[4] vectors from the data port constant cache.
2131 * Location (in buffer) should be a multiple of 16.
2132 * Used for fetching shader constants.
2133 */
2134 void brw_oword_block_read(struct brw_codegen *p,
2135 struct brw_reg dest,
2136 struct brw_reg mrf,
2137 uint32_t offset,
2138 uint32_t bind_table_index)
2139 {
2140 const struct gen_device_info *devinfo = p->devinfo;
2141 const unsigned target_cache =
2142 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2143 BRW_SFID_DATAPORT_READ);
2144 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2145
2146 /* On newer hardware, offset is in units of owords. */
2147 if (devinfo->gen >= 6)
2148 offset /= 16;
2149
2150 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2151
2152 brw_push_insn_state(p);
2153 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2154 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2155 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2156
2157 brw_push_insn_state(p);
2158 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2159 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2160
2161 /* set message header global offset field (reg 0, element 2) */
2162 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2163 brw_MOV(p,
2164 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2165 mrf.nr,
2166 2), BRW_REGISTER_TYPE_UD),
2167 brw_imm_ud(offset));
2168 brw_pop_insn_state(p);
2169
2170 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2171
2172 brw_inst_set_sfid(devinfo, insn, target_cache);
2173
2174 /* cast dest to a uword[8] vector */
2175 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2176
2177 brw_set_dest(p, insn, dest);
2178 if (devinfo->gen >= 6) {
2179 brw_set_src0(p, insn, mrf);
2180 } else {
2181 brw_set_src0(p, insn, brw_null_reg());
2182 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2183 }
2184
2185 brw_set_desc(p, insn,
2186 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2187 brw_dp_read_desc(devinfo, bind_table_index,
2188 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2189 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2190 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2191
2192 brw_pop_insn_state(p);
2193 }
2194
2195 brw_inst *
2196 brw_fb_WRITE(struct brw_codegen *p,
2197 struct brw_reg payload,
2198 struct brw_reg implied_header,
2199 unsigned msg_control,
2200 unsigned binding_table_index,
2201 unsigned msg_length,
2202 unsigned response_length,
2203 bool eot,
2204 bool last_render_target,
2205 bool header_present)
2206 {
2207 const struct gen_device_info *devinfo = p->devinfo;
2208 const unsigned target_cache =
2209 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2210 BRW_SFID_DATAPORT_WRITE);
2211 brw_inst *insn;
2212 unsigned msg_type;
2213 struct brw_reg dest, src0;
2214
2215 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2216 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2217 else
2218 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2219
2220 if (devinfo->gen >= 6) {
2221 insn = next_insn(p, BRW_OPCODE_SENDC);
2222 } else {
2223 insn = next_insn(p, BRW_OPCODE_SEND);
2224 }
2225 brw_inst_set_sfid(devinfo, insn, target_cache);
2226 brw_inst_set_compression(devinfo, insn, false);
2227
2228 if (devinfo->gen >= 6) {
2229 /* headerless version, just submit color payload */
2230 src0 = payload;
2231
2232 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2233 } else {
2234 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2235 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2236 src0 = implied_header;
2237
2238 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2239 }
2240
2241 brw_set_dest(p, insn, dest);
2242 brw_set_src0(p, insn, src0);
2243 brw_set_desc(p, insn,
2244 brw_message_desc(devinfo, msg_length, response_length,
2245 header_present) |
2246 brw_dp_write_desc(devinfo, binding_table_index, msg_control,
2247 msg_type, last_render_target,
2248 0 /* send_commit_msg */));
2249 brw_inst_set_eot(devinfo, insn, eot);
2250
2251 return insn;
2252 }
2253
2254 brw_inst *
2255 gen9_fb_READ(struct brw_codegen *p,
2256 struct brw_reg dst,
2257 struct brw_reg payload,
2258 unsigned binding_table_index,
2259 unsigned msg_length,
2260 unsigned response_length,
2261 bool per_sample)
2262 {
2263 const struct gen_device_info *devinfo = p->devinfo;
2264 assert(devinfo->gen >= 9);
2265 const unsigned msg_subtype =
2266 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2267 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2268
2269 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2270 brw_set_dest(p, insn, dst);
2271 brw_set_src0(p, insn, payload);
2272 brw_set_desc(
2273 p, insn,
2274 brw_message_desc(devinfo, msg_length, response_length, true) |
2275 brw_dp_read_desc(devinfo, binding_table_index,
2276 per_sample << 5 | msg_subtype,
2277 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2278 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2279 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2280
2281 return insn;
2282 }
2283
2284 /**
2285 * Texture sample instruction.
2286 * Note: the msg_type plus msg_length values determine exactly what kind
2287 * of sampling operation is performed. See volume 4, page 161 of docs.
2288 */
2289 void brw_SAMPLE(struct brw_codegen *p,
2290 struct brw_reg dest,
2291 unsigned msg_reg_nr,
2292 struct brw_reg src0,
2293 unsigned binding_table_index,
2294 unsigned sampler,
2295 unsigned msg_type,
2296 unsigned response_length,
2297 unsigned msg_length,
2298 unsigned header_present,
2299 unsigned simd_mode,
2300 unsigned return_format)
2301 {
2302 const struct gen_device_info *devinfo = p->devinfo;
2303 brw_inst *insn;
2304
2305 if (msg_reg_nr != -1)
2306 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2307
2308 insn = next_insn(p, BRW_OPCODE_SEND);
2309 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2310 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2311
2312 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2313 *
2314 * "Instruction compression is not allowed for this instruction (that
2315 * is, send). The hardware behavior is undefined if this instruction is
2316 * set as compressed. However, compress control can be set to "SecHalf"
2317 * to affect the EMask generation."
2318 *
2319 * No similar wording is found in later PRMs, but there are examples
2320 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2321 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2322 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2323 */
2324 brw_inst_set_compression(devinfo, insn, false);
2325
2326 if (devinfo->gen < 6)
2327 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2328
2329 brw_set_dest(p, insn, dest);
2330 brw_set_src0(p, insn, src0);
2331 brw_set_desc(p, insn,
2332 brw_message_desc(devinfo, msg_length, response_length,
2333 header_present) |
2334 brw_sampler_desc(devinfo, binding_table_index, sampler,
2335 msg_type, simd_mode, return_format));
2336 }
2337
2338 /* Adjust the message header's sampler state pointer to
2339 * select the correct group of 16 samplers.
2340 */
2341 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2342 struct brw_reg header,
2343 struct brw_reg sampler_index)
2344 {
2345 /* The "Sampler Index" field can only store values between 0 and 15.
2346 * However, we can add an offset to the "Sampler State Pointer"
2347 * field, effectively selecting a different set of 16 samplers.
2348 *
2349 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2350 * offset, and each sampler state is only 16-bytes, so we can't
2351 * exclusively use the offset - we have to use both.
2352 */
2353
2354 const struct gen_device_info *devinfo = p->devinfo;
2355
2356 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2357 const int sampler_state_size = 16; /* 16 bytes */
2358 uint32_t sampler = sampler_index.ud;
2359
2360 if (sampler >= 16) {
2361 assert(devinfo->is_haswell || devinfo->gen >= 8);
2362 brw_ADD(p,
2363 get_element_ud(header, 3),
2364 get_element_ud(brw_vec8_grf(0, 0), 3),
2365 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2366 }
2367 } else {
2368 /* Non-const sampler array indexing case */
2369 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2370 return;
2371 }
2372
2373 struct brw_reg temp = get_element_ud(header, 3);
2374
2375 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2376 brw_SHL(p, temp, temp, brw_imm_ud(4));
2377 brw_ADD(p,
2378 get_element_ud(header, 3),
2379 get_element_ud(brw_vec8_grf(0, 0), 3),
2380 temp);
2381 }
2382 }
2383
2384 /* All these variables are pretty confusing - we might be better off
2385 * using bitmasks and macros for this, in the old style. Or perhaps
2386 * just having the caller instantiate the fields in dword3 itself.
2387 */
2388 void brw_urb_WRITE(struct brw_codegen *p,
2389 struct brw_reg dest,
2390 unsigned msg_reg_nr,
2391 struct brw_reg src0,
2392 enum brw_urb_write_flags flags,
2393 unsigned msg_length,
2394 unsigned response_length,
2395 unsigned offset,
2396 unsigned swizzle)
2397 {
2398 const struct gen_device_info *devinfo = p->devinfo;
2399 brw_inst *insn;
2400
2401 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2402
2403 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2404 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2405 brw_push_insn_state(p);
2406 brw_set_default_access_mode(p, BRW_ALIGN_1);
2407 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2408 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2409 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2410 BRW_REGISTER_TYPE_UD),
2411 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2412 brw_imm_ud(0xff00));
2413 brw_pop_insn_state(p);
2414 }
2415
2416 insn = next_insn(p, BRW_OPCODE_SEND);
2417
2418 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2419
2420 brw_set_dest(p, insn, dest);
2421 brw_set_src0(p, insn, src0);
2422 brw_set_src1(p, insn, brw_imm_d(0));
2423
2424 if (devinfo->gen < 6)
2425 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2426
2427 brw_set_urb_message(p,
2428 insn,
2429 flags,
2430 msg_length,
2431 response_length,
2432 offset,
2433 swizzle);
2434 }
2435
2436 void
2437 brw_send_indirect_message(struct brw_codegen *p,
2438 unsigned sfid,
2439 struct brw_reg dst,
2440 struct brw_reg payload,
2441 struct brw_reg desc,
2442 unsigned desc_imm)
2443 {
2444 const struct gen_device_info *devinfo = p->devinfo;
2445 struct brw_inst *send;
2446
2447 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2448
2449 assert(desc.type == BRW_REGISTER_TYPE_UD);
2450
2451 if (desc.file == BRW_IMMEDIATE_VALUE) {
2452 send = next_insn(p, BRW_OPCODE_SEND);
2453 brw_set_desc(p, send, desc.ud | desc_imm);
2454
2455 } else {
2456 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2457
2458 brw_push_insn_state(p);
2459 brw_set_default_access_mode(p, BRW_ALIGN_1);
2460 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2461 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2462 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2463
2464 /* Load the indirect descriptor to an address register using OR so the
2465 * caller can specify additional descriptor bits with the desc_imm
2466 * immediate.
2467 */
2468 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2469
2470 brw_pop_insn_state(p);
2471
2472 send = next_insn(p, BRW_OPCODE_SEND);
2473 brw_set_src1(p, send, addr);
2474 }
2475
2476 if (dst.width < BRW_EXECUTE_8)
2477 brw_inst_set_exec_size(devinfo, send, dst.width);
2478
2479 brw_set_dest(p, send, dst);
2480 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2481 brw_inst_set_sfid(devinfo, send, sfid);
2482 }
2483
2484 static void
2485 brw_send_indirect_surface_message(struct brw_codegen *p,
2486 unsigned sfid,
2487 struct brw_reg dst,
2488 struct brw_reg payload,
2489 struct brw_reg surface,
2490 unsigned desc_imm)
2491 {
2492 if (surface.file != BRW_IMMEDIATE_VALUE) {
2493 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2494
2495 brw_push_insn_state(p);
2496 brw_set_default_access_mode(p, BRW_ALIGN_1);
2497 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2498 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2499 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2500
2501 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2502 * some surface array is accessed out of bounds.
2503 */
2504 brw_AND(p, addr,
2505 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2506 BRW_GET_SWZ(surface.swizzle, 0)),
2507 brw_imm_ud(0xff));
2508
2509 brw_pop_insn_state(p);
2510
2511 surface = addr;
2512 }
2513
2514 brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm);
2515 }
2516
2517 static bool
2518 while_jumps_before_offset(const struct gen_device_info *devinfo,
2519 brw_inst *insn, int while_offset, int start_offset)
2520 {
2521 int scale = 16 / brw_jump_scale(devinfo);
2522 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2523 : brw_inst_jip(devinfo, insn);
2524 assert(jip < 0);
2525 return while_offset + jip * scale <= start_offset;
2526 }
2527
2528
2529 static int
2530 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2531 {
2532 int offset;
2533 void *store = p->store;
2534 const struct gen_device_info *devinfo = p->devinfo;
2535
2536 int depth = 0;
2537
2538 for (offset = next_offset(devinfo, store, start_offset);
2539 offset < p->next_insn_offset;
2540 offset = next_offset(devinfo, store, offset)) {
2541 brw_inst *insn = store + offset;
2542
2543 switch (brw_inst_opcode(devinfo, insn)) {
2544 case BRW_OPCODE_IF:
2545 depth++;
2546 break;
2547 case BRW_OPCODE_ENDIF:
2548 if (depth == 0)
2549 return offset;
2550 depth--;
2551 break;
2552 case BRW_OPCODE_WHILE:
2553 /* If the while doesn't jump before our instruction, it's the end
2554 * of a sibling do...while loop. Ignore it.
2555 */
2556 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2557 continue;
2558 /* fallthrough */
2559 case BRW_OPCODE_ELSE:
2560 case BRW_OPCODE_HALT:
2561 if (depth == 0)
2562 return offset;
2563 }
2564 }
2565
2566 return 0;
2567 }
2568
2569 /* There is no DO instruction on gen6, so to find the end of the loop
2570 * we have to see if the loop is jumping back before our start
2571 * instruction.
2572 */
2573 static int
2574 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2575 {
2576 const struct gen_device_info *devinfo = p->devinfo;
2577 int offset;
2578 void *store = p->store;
2579
2580 assert(devinfo->gen >= 6);
2581
2582 /* Always start after the instruction (such as a WHILE) we're trying to fix
2583 * up.
2584 */
2585 for (offset = next_offset(devinfo, store, start_offset);
2586 offset < p->next_insn_offset;
2587 offset = next_offset(devinfo, store, offset)) {
2588 brw_inst *insn = store + offset;
2589
2590 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2591 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2592 return offset;
2593 }
2594 }
2595 assert(!"not reached");
2596 return start_offset;
2597 }
2598
2599 /* After program generation, go back and update the UIP and JIP of
2600 * BREAK, CONT, and HALT instructions to their correct locations.
2601 */
2602 void
2603 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2604 {
2605 const struct gen_device_info *devinfo = p->devinfo;
2606 int offset;
2607 int br = brw_jump_scale(devinfo);
2608 int scale = 16 / br;
2609 void *store = p->store;
2610
2611 if (devinfo->gen < 6)
2612 return;
2613
2614 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2615 brw_inst *insn = store + offset;
2616 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2617
2618 int block_end_offset = brw_find_next_block_end(p, offset);
2619 switch (brw_inst_opcode(devinfo, insn)) {
2620 case BRW_OPCODE_BREAK:
2621 assert(block_end_offset != 0);
2622 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2623 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2624 brw_inst_set_uip(devinfo, insn,
2625 (brw_find_loop_end(p, offset) - offset +
2626 (devinfo->gen == 6 ? 16 : 0)) / scale);
2627 break;
2628 case BRW_OPCODE_CONTINUE:
2629 assert(block_end_offset != 0);
2630 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2631 brw_inst_set_uip(devinfo, insn,
2632 (brw_find_loop_end(p, offset) - offset) / scale);
2633
2634 assert(brw_inst_uip(devinfo, insn) != 0);
2635 assert(brw_inst_jip(devinfo, insn) != 0);
2636 break;
2637
2638 case BRW_OPCODE_ENDIF: {
2639 int32_t jump = (block_end_offset == 0) ?
2640 1 * br : (block_end_offset - offset) / scale;
2641 if (devinfo->gen >= 7)
2642 brw_inst_set_jip(devinfo, insn, jump);
2643 else
2644 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2645 break;
2646 }
2647
2648 case BRW_OPCODE_HALT:
2649 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2650 *
2651 * "In case of the halt instruction not inside any conditional
2652 * code block, the value of <JIP> and <UIP> should be the
2653 * same. In case of the halt instruction inside conditional code
2654 * block, the <UIP> should be the end of the program, and the
2655 * <JIP> should be end of the most inner conditional code block."
2656 *
2657 * The uip will have already been set by whoever set up the
2658 * instruction.
2659 */
2660 if (block_end_offset == 0) {
2661 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2662 } else {
2663 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2664 }
2665 assert(brw_inst_uip(devinfo, insn) != 0);
2666 assert(brw_inst_jip(devinfo, insn) != 0);
2667 break;
2668 }
2669 }
2670 }
2671
2672 void brw_ff_sync(struct brw_codegen *p,
2673 struct brw_reg dest,
2674 unsigned msg_reg_nr,
2675 struct brw_reg src0,
2676 bool allocate,
2677 unsigned response_length,
2678 bool eot)
2679 {
2680 const struct gen_device_info *devinfo = p->devinfo;
2681 brw_inst *insn;
2682
2683 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2684
2685 insn = next_insn(p, BRW_OPCODE_SEND);
2686 brw_set_dest(p, insn, dest);
2687 brw_set_src0(p, insn, src0);
2688 brw_set_src1(p, insn, brw_imm_d(0));
2689
2690 if (devinfo->gen < 6)
2691 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2692
2693 brw_set_ff_sync_message(p,
2694 insn,
2695 allocate,
2696 response_length,
2697 eot);
2698 }
2699
2700 /**
2701 * Emit the SEND instruction necessary to generate stream output data on Gen6
2702 * (for transform feedback).
2703 *
2704 * If send_commit_msg is true, this is the last piece of stream output data
2705 * from this thread, so send the data as a committed write. According to the
2706 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2707 *
2708 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2709 * writes are complete by sending the final write as a committed write."
2710 */
2711 void
2712 brw_svb_write(struct brw_codegen *p,
2713 struct brw_reg dest,
2714 unsigned msg_reg_nr,
2715 struct brw_reg src0,
2716 unsigned binding_table_index,
2717 bool send_commit_msg)
2718 {
2719 const struct gen_device_info *devinfo = p->devinfo;
2720 const unsigned target_cache =
2721 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2722 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2723 BRW_SFID_DATAPORT_WRITE);
2724 brw_inst *insn;
2725
2726 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2727
2728 insn = next_insn(p, BRW_OPCODE_SEND);
2729 brw_inst_set_sfid(devinfo, insn, target_cache);
2730 brw_set_dest(p, insn, dest);
2731 brw_set_src0(p, insn, src0);
2732 brw_set_desc(p, insn,
2733 brw_message_desc(devinfo, 1, send_commit_msg, true) |
2734 brw_dp_write_desc(devinfo, binding_table_index,
2735 0, /* msg_control: ignored */
2736 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2737 0, /* last_render_target: ignored */
2738 send_commit_msg)); /* send_commit_msg */
2739 }
2740
2741 static unsigned
2742 brw_surface_payload_size(struct brw_codegen *p,
2743 unsigned num_channels,
2744 bool has_simd4x2,
2745 bool has_simd16)
2746 {
2747 if (has_simd4x2 && brw_get_default_access_mode(p) == BRW_ALIGN_16)
2748 return 1;
2749 else if (has_simd16 && brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2750 return 2 * num_channels;
2751 else
2752 return num_channels;
2753 }
2754
2755 static uint32_t
2756 brw_dp_untyped_atomic_desc(struct brw_codegen *p,
2757 unsigned atomic_op,
2758 bool response_expected)
2759 {
2760 const struct gen_device_info *devinfo = p->devinfo;
2761 unsigned msg_control =
2762 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2763 (response_expected ? 1 << 5 : 0); /* Return data expected */
2764 unsigned msg_type;
2765
2766 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2767 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2768 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2769 msg_control |= 1 << 4; /* SIMD8 mode */
2770
2771 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP;
2772 } else {
2773 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2;
2774 }
2775 } else {
2776 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2777 msg_control |= 1 << 4; /* SIMD8 mode */
2778
2779 msg_type = GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP;
2780 }
2781
2782 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2783 }
2784
2785 void
2786 brw_untyped_atomic(struct brw_codegen *p,
2787 struct brw_reg dst,
2788 struct brw_reg payload,
2789 struct brw_reg surface,
2790 unsigned atomic_op,
2791 unsigned msg_length,
2792 bool response_expected,
2793 bool header_present)
2794 {
2795 const struct gen_device_info *devinfo = p->devinfo;
2796 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2797 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2798 GEN7_SFID_DATAPORT_DATA_CACHE);
2799 const unsigned response_length = brw_surface_payload_size(
2800 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, true);
2801 const unsigned desc =
2802 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2803 brw_dp_untyped_atomic_desc(p, atomic_op, response_expected);
2804 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2805 /* Mask out unused components -- This is especially important in Align16
2806 * mode on generations that don't have native support for SIMD4x2 atomics,
2807 * because unused but enabled components will cause the dataport to perform
2808 * additional atomic operations on the addresses that happen to be in the
2809 * uninitialized Y, Z and W coordinates of the payload.
2810 */
2811 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2812
2813 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
2814 payload, surface, desc);
2815 }
2816
2817 static uint32_t
2818 brw_dp_untyped_atomic_float_desc(struct brw_codegen *p,
2819 unsigned atomic_op,
2820 bool response_expected)
2821 {
2822 const struct gen_device_info *devinfo = p->devinfo;
2823 const unsigned msg_type = GEN9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP;
2824 unsigned msg_control =
2825 atomic_op | /* Atomic Operation Type: BRW_AOP_F* */
2826 (response_expected ? 1 << 5 : 0); /* Return data expected */
2827
2828 assert(devinfo->gen >= 9);
2829 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2830
2831 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2832 msg_control |= 1 << 4; /* SIMD8 mode */
2833
2834 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2835 }
2836
2837 void
2838 brw_untyped_atomic_float(struct brw_codegen *p,
2839 struct brw_reg dst,
2840 struct brw_reg payload,
2841 struct brw_reg surface,
2842 unsigned atomic_op,
2843 unsigned msg_length,
2844 bool response_expected,
2845 bool header_present)
2846 {
2847 const struct gen_device_info *devinfo = p->devinfo;
2848
2849 assert(devinfo->gen >= 9);
2850 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2851
2852 const unsigned sfid = HSW_SFID_DATAPORT_DATA_CACHE_1;
2853 const unsigned response_length = brw_surface_payload_size(
2854 p, response_expected, true, true);
2855 const unsigned desc =
2856 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2857 brw_dp_untyped_atomic_float_desc(p, atomic_op, response_expected);
2858
2859 brw_send_indirect_surface_message(p, sfid,
2860 brw_writemask(dst, WRITEMASK_XYZW),
2861 payload, surface, desc);
2862 }
2863
2864 static uint32_t
2865 brw_dp_untyped_surface_read_desc(struct brw_codegen *p,
2866 unsigned num_channels)
2867 {
2868 const struct gen_device_info *devinfo = p->devinfo;
2869 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2870 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2871 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ);
2872 /* Set mask of 32-bit channels to drop. */
2873 unsigned msg_control = 0xf & (0xf << num_channels);
2874
2875 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2876 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2877 msg_control |= 1 << 4; /* SIMD16 mode */
2878 else
2879 msg_control |= 2 << 4; /* SIMD8 mode */
2880 }
2881
2882 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2883 }
2884
2885 void
2886 brw_untyped_surface_read(struct brw_codegen *p,
2887 struct brw_reg dst,
2888 struct brw_reg payload,
2889 struct brw_reg surface,
2890 unsigned msg_length,
2891 unsigned num_channels)
2892 {
2893 const struct gen_device_info *devinfo = p->devinfo;
2894 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2895 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2896 GEN7_SFID_DATAPORT_DATA_CACHE);
2897 const unsigned response_length =
2898 brw_surface_payload_size(p, num_channels, true, true);
2899 const unsigned desc =
2900 brw_message_desc(devinfo, msg_length, response_length, false) |
2901 brw_dp_untyped_surface_read_desc(p, num_channels);
2902
2903 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
2904 }
2905
2906 static uint32_t
2907 brw_dp_untyped_surface_write_desc(struct brw_codegen *p,
2908 unsigned num_channels)
2909 {
2910 const struct gen_device_info *devinfo = p->devinfo;
2911 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2912 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2913 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2914 /* Set mask of 32-bit channels to drop. */
2915 unsigned msg_control = 0xf & (0xf << num_channels);
2916
2917 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2918 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2919 msg_control |= 1 << 4; /* SIMD16 mode */
2920 else
2921 msg_control |= 2 << 4; /* SIMD8 mode */
2922 } else {
2923 if (devinfo->gen >= 8 || devinfo->is_haswell)
2924 msg_control |= 0 << 4; /* SIMD4x2 mode */
2925 else
2926 msg_control |= 2 << 4; /* SIMD8 mode */
2927 }
2928
2929 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2930 }
2931
2932 void
2933 brw_untyped_surface_write(struct brw_codegen *p,
2934 struct brw_reg payload,
2935 struct brw_reg surface,
2936 unsigned msg_length,
2937 unsigned num_channels,
2938 bool header_present)
2939 {
2940 const struct gen_device_info *devinfo = p->devinfo;
2941 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2942 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2943 GEN7_SFID_DATAPORT_DATA_CACHE);
2944 const unsigned desc =
2945 brw_message_desc(devinfo, msg_length, 0, header_present) |
2946 brw_dp_untyped_surface_write_desc(p, num_channels);
2947 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2948 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2949 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2950 WRITEMASK_X : WRITEMASK_XYZW;
2951
2952 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
2953 payload, surface, desc);
2954 }
2955
2956 static unsigned
2957 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
2958 {
2959 switch (bit_size) {
2960 case 8:
2961 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
2962 case 16:
2963 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
2964 case 32:
2965 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
2966 default:
2967 unreachable("Unsupported bit_size for byte scattered messages");
2968 }
2969 }
2970
2971 static uint32_t
2972 brw_dp_byte_scattered_desc(struct brw_codegen *p, unsigned bit_size,
2973 unsigned msg_type)
2974 {
2975 const struct gen_device_info *devinfo = p->devinfo;
2976 unsigned msg_control =
2977 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
2978
2979 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2980 msg_control |= 1; /* SIMD16 mode */
2981 else
2982 msg_control |= 0; /* SIMD8 mode */
2983
2984 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2985 }
2986
2987 void
2988 brw_byte_scattered_read(struct brw_codegen *p,
2989 struct brw_reg dst,
2990 struct brw_reg payload,
2991 struct brw_reg surface,
2992 unsigned msg_length,
2993 unsigned bit_size)
2994 {
2995 const struct gen_device_info *devinfo = p->devinfo;
2996 assert(devinfo->gen > 7 || devinfo->is_haswell);
2997 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2998 const unsigned response_length =
2999 brw_surface_payload_size(p, 1, true, true);
3000 const unsigned desc =
3001 brw_message_desc(devinfo, msg_length, response_length, false) |
3002 brw_dp_byte_scattered_desc(p, bit_size,
3003 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3004
3005 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
3006 dst, payload, surface, desc);
3007 }
3008
3009 void
3010 brw_byte_scattered_write(struct brw_codegen *p,
3011 struct brw_reg payload,
3012 struct brw_reg surface,
3013 unsigned msg_length,
3014 unsigned bit_size,
3015 bool header_present)
3016 {
3017 const struct gen_device_info *devinfo = p->devinfo;
3018 assert(devinfo->gen > 7 || devinfo->is_haswell);
3019 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3020 const unsigned desc =
3021 brw_message_desc(devinfo, msg_length, 0, header_present) |
3022 brw_dp_byte_scattered_desc(p, bit_size,
3023 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3024
3025 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
3026 brw_writemask(brw_null_reg(),
3027 WRITEMASK_XYZW),
3028 payload, surface, desc);
3029 }
3030
3031 static uint32_t
3032 brw_dp_typed_atomic_desc(struct brw_codegen *p,
3033 unsigned atomic_op,
3034 bool response_expected)
3035 {
3036 const struct gen_device_info *devinfo = p->devinfo;
3037 unsigned msg_control =
3038 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3039 (response_expected ? 1 << 5 : 0); /* Return data expected */
3040 unsigned msg_type;
3041
3042 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3043 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3044 if ((brw_get_default_group(p) / 8) % 2 == 1)
3045 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3046
3047 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP;
3048 } else {
3049 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2;
3050 }
3051
3052 } else {
3053 if ((brw_get_default_group(p) / 8) % 2 == 1)
3054 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3055
3056 msg_type = GEN7_DATAPORT_RC_TYPED_ATOMIC_OP;
3057 }
3058
3059 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3060 }
3061
3062 void
3063 brw_typed_atomic(struct brw_codegen *p,
3064 struct brw_reg dst,
3065 struct brw_reg payload,
3066 struct brw_reg surface,
3067 unsigned atomic_op,
3068 unsigned msg_length,
3069 bool response_expected,
3070 bool header_present) {
3071 const struct gen_device_info *devinfo = p->devinfo;
3072 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3073 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3074 GEN6_SFID_DATAPORT_RENDER_CACHE);
3075 const unsigned response_length = brw_surface_payload_size(
3076 p, response_expected, devinfo->gen >= 8 || devinfo->is_haswell, false);
3077 const unsigned desc =
3078 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3079 brw_dp_typed_atomic_desc(p, atomic_op, response_expected);
3080 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3081 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3082 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3083
3084 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
3085 payload, surface, desc);
3086 }
3087
3088 static uint32_t
3089 brw_dp_typed_surface_read_desc(struct brw_codegen *p,
3090 unsigned num_channels)
3091 {
3092 const struct gen_device_info *devinfo = p->devinfo;
3093 /* Set mask of unused channels. */
3094 unsigned msg_control = 0xf & (0xf << num_channels);
3095 unsigned msg_type;
3096
3097 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3098 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3099 if ((brw_get_default_group(p) / 8) % 2 == 1)
3100 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3101 else
3102 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3103 }
3104
3105 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ;
3106 } else {
3107 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3108 if ((brw_get_default_group(p) / 8) % 2 == 1)
3109 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3110 }
3111
3112 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_READ;
3113 }
3114
3115 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3116 }
3117
3118 void
3119 brw_typed_surface_read(struct brw_codegen *p,
3120 struct brw_reg dst,
3121 struct brw_reg payload,
3122 struct brw_reg surface,
3123 unsigned msg_length,
3124 unsigned num_channels,
3125 bool header_present)
3126 {
3127 const struct gen_device_info *devinfo = p->devinfo;
3128 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3129 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3130 GEN6_SFID_DATAPORT_RENDER_CACHE);
3131 const unsigned response_length = brw_surface_payload_size(
3132 p, num_channels, devinfo->gen >= 8 || devinfo->is_haswell, false);
3133 const unsigned desc =
3134 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3135 brw_dp_typed_surface_read_desc(p, num_channels);
3136
3137 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
3138 }
3139
3140 static uint32_t
3141 brw_dp_typed_surface_write_desc(struct brw_codegen *p,
3142 unsigned num_channels)
3143 {
3144 const struct gen_device_info *devinfo = p->devinfo;
3145 /* Set mask of unused channels. */
3146 unsigned msg_control = 0xf & (0xf << num_channels);
3147 unsigned msg_type;
3148
3149 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3150 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3151 if ((brw_get_default_group(p) / 8) % 2 == 1)
3152 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3153 else
3154 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3155 }
3156
3157 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE;
3158
3159 } else {
3160 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3161 if ((brw_get_default_group(p) / 8) % 2 == 1)
3162 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3163 }
3164
3165 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE;
3166 }
3167
3168 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3169 }
3170
3171 void
3172 brw_typed_surface_write(struct brw_codegen *p,
3173 struct brw_reg payload,
3174 struct brw_reg surface,
3175 unsigned msg_length,
3176 unsigned num_channels,
3177 bool header_present)
3178 {
3179 const struct gen_device_info *devinfo = p->devinfo;
3180 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3181 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3182 GEN6_SFID_DATAPORT_RENDER_CACHE);
3183 const unsigned desc =
3184 brw_message_desc(devinfo, msg_length, 0, header_present) |
3185 brw_dp_typed_surface_write_desc(p, num_channels);
3186 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3187 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3188 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3189 WRITEMASK_X : WRITEMASK_XYZW);
3190
3191 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
3192 payload, surface, desc);
3193 }
3194
3195 static void
3196 brw_set_memory_fence_message(struct brw_codegen *p,
3197 struct brw_inst *insn,
3198 enum brw_message_target sfid,
3199 bool commit_enable)
3200 {
3201 const struct gen_device_info *devinfo = p->devinfo;
3202
3203 brw_set_desc(p, insn, brw_message_desc(
3204 devinfo, 1, (commit_enable ? 1 : 0), true));
3205
3206 brw_inst_set_sfid(devinfo, insn, sfid);
3207
3208 switch (sfid) {
3209 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3210 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3211 break;
3212 case GEN7_SFID_DATAPORT_DATA_CACHE:
3213 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3214 break;
3215 default:
3216 unreachable("Not reached");
3217 }
3218
3219 if (commit_enable)
3220 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3221 }
3222
3223 void
3224 brw_memory_fence(struct brw_codegen *p,
3225 struct brw_reg dst,
3226 enum opcode send_op)
3227 {
3228 const struct gen_device_info *devinfo = p->devinfo;
3229 const bool commit_enable =
3230 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3231 (devinfo->gen == 7 && !devinfo->is_haswell);
3232 struct brw_inst *insn;
3233
3234 brw_push_insn_state(p);
3235 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3236 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3237 dst = vec1(dst);
3238
3239 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3240 * message doesn't write anything back.
3241 */
3242 insn = next_insn(p, send_op);
3243 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3244 brw_set_dest(p, insn, dst);
3245 brw_set_src0(p, insn, dst);
3246 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3247 commit_enable);
3248
3249 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3250 /* IVB does typed surface access through the render cache, so we need to
3251 * flush it too. Use a different register so both flushes can be
3252 * pipelined by the hardware.
3253 */
3254 insn = next_insn(p, send_op);
3255 brw_set_dest(p, insn, offset(dst, 1));
3256 brw_set_src0(p, insn, offset(dst, 1));
3257 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3258 commit_enable);
3259
3260 /* Now write the response of the second message into the response of the
3261 * first to trigger a pipeline stall -- This way future render and data
3262 * cache messages will be properly ordered with respect to past data and
3263 * render cache messages.
3264 */
3265 brw_MOV(p, dst, offset(dst, 1));
3266 }
3267
3268 brw_pop_insn_state(p);
3269 }
3270
3271 void
3272 brw_pixel_interpolator_query(struct brw_codegen *p,
3273 struct brw_reg dest,
3274 struct brw_reg mrf,
3275 bool noperspective,
3276 unsigned mode,
3277 struct brw_reg data,
3278 unsigned msg_length,
3279 unsigned response_length)
3280 {
3281 const struct gen_device_info *devinfo = p->devinfo;
3282 const uint16_t exec_size = brw_get_default_exec_size(p);
3283 const unsigned slot_group = brw_get_default_group(p) / 16;
3284 const unsigned simd_mode = (exec_size == BRW_EXECUTE_16);
3285 const unsigned desc =
3286 brw_message_desc(devinfo, msg_length, response_length, false) |
3287 brw_pixel_interp_desc(devinfo, mode, noperspective, simd_mode,
3288 slot_group);
3289
3290 /* brw_send_indirect_message will automatically use a direct send message
3291 * if data is actually immediate.
3292 */
3293 brw_send_indirect_message(p,
3294 GEN7_SFID_PIXEL_INTERPOLATOR,
3295 dest,
3296 mrf,
3297 vec1(data),
3298 desc);
3299 }
3300
3301 void
3302 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3303 struct brw_reg mask)
3304 {
3305 const struct gen_device_info *devinfo = p->devinfo;
3306 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3307 const unsigned qtr_control = brw_get_default_group(p) / 8;
3308 brw_inst *inst;
3309
3310 assert(devinfo->gen >= 7);
3311 assert(mask.type == BRW_REGISTER_TYPE_UD);
3312
3313 brw_push_insn_state(p);
3314
3315 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3316 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3317
3318 if (devinfo->gen >= 8) {
3319 /* Getting the first active channel index is easy on Gen8: Just find
3320 * the first bit set in the execution mask. The register exists on
3321 * HSW already but it reads back as all ones when the current
3322 * instruction has execution masking disabled, so it's kind of
3323 * useless.
3324 */
3325 struct brw_reg exec_mask =
3326 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3327
3328 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3329 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3330 /* Unfortunately, ce0 does not take into account the thread
3331 * dispatch mask, which may be a problem in cases where it's not
3332 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3333 * some n). Combine ce0 with the given dispatch (or vector) mask
3334 * to mask off those channels which were never dispatched by the
3335 * hardware.
3336 */
3337 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3338 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3339 exec_mask = vec1(dst);
3340 }
3341
3342 /* Quarter control has the effect of magically shifting the value of
3343 * ce0 so you'll get the first active channel relative to the
3344 * specified quarter control as result.
3345 */
3346 inst = brw_FBL(p, vec1(dst), exec_mask);
3347 } else {
3348 const struct brw_reg flag = brw_flag_reg(p->current->flag_subreg / 2,
3349 p->current->flag_subreg % 2);
3350
3351 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3352 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3353
3354 /* Run enough instructions returning zero with execution masking and
3355 * a conditional modifier enabled in order to get the full execution
3356 * mask in f1.0. We could use a single 32-wide move here if it
3357 * weren't because of the hardware bug that causes channel enables to
3358 * be applied incorrectly to the second half of 32-wide instructions
3359 * on Gen7.
3360 */
3361 const unsigned lower_size = MIN2(16, exec_size);
3362 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3363 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3364 brw_imm_uw(0));
3365 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3366 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3367 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3368 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3369 }
3370
3371 /* Find the first bit set in the exec_size-wide portion of the flag
3372 * register that was updated by the last sequence of MOV
3373 * instructions.
3374 */
3375 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3376 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3377 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3378 }
3379 } else {
3380 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3381
3382 if (devinfo->gen >= 8 &&
3383 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3384 /* In SIMD4x2 mode the first active channel index is just the
3385 * negation of the first bit of the mask register. Note that ce0
3386 * doesn't take into account the dispatch mask, so the Gen7 path
3387 * should be used instead unless you have the guarantee that the
3388 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3389 * for some n).
3390 */
3391 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3392 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3393 brw_imm_ud(1));
3394
3395 } else {
3396 /* Overwrite the destination without and with execution masking to
3397 * find out which of the channels is active.
3398 */
3399 brw_push_insn_state(p);
3400 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3401 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3402 brw_imm_ud(1));
3403
3404 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3405 brw_imm_ud(0));
3406 brw_pop_insn_state(p);
3407 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3408 }
3409 }
3410
3411 brw_pop_insn_state(p);
3412 }
3413
3414 void
3415 brw_broadcast(struct brw_codegen *p,
3416 struct brw_reg dst,
3417 struct brw_reg src,
3418 struct brw_reg idx)
3419 {
3420 const struct gen_device_info *devinfo = p->devinfo;
3421 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3422 brw_inst *inst;
3423
3424 brw_push_insn_state(p);
3425 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3426 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3427
3428 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3429 src.address_mode == BRW_ADDRESS_DIRECT);
3430 assert(!src.abs && !src.negate);
3431 assert(src.type == dst.type);
3432
3433 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3434 idx.file == BRW_IMMEDIATE_VALUE) {
3435 /* Trivial, the source is already uniform or the index is a constant.
3436 * We will typically not get here if the optimizer is doing its job, but
3437 * asserting would be mean.
3438 */
3439 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3440 brw_MOV(p, dst,
3441 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3442 stride(suboffset(src, 4 * i), 0, 4, 1)));
3443 } else {
3444 /* From the Haswell PRM section "Register Region Restrictions":
3445 *
3446 * "The lower bits of the AddressImmediate must not overflow to
3447 * change the register address. The lower 5 bits of Address
3448 * Immediate when added to lower 5 bits of address register gives
3449 * the sub-register offset. The upper bits of Address Immediate
3450 * when added to upper bits of address register gives the register
3451 * address. Any overflow from sub-register offset is dropped."
3452 *
3453 * Fortunately, for broadcast, we never have a sub-register offset so
3454 * this isn't an issue.
3455 */
3456 assert(src.subnr == 0);
3457
3458 if (align1) {
3459 const struct brw_reg addr =
3460 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3461 unsigned offset = src.nr * REG_SIZE + src.subnr;
3462 /* Limit in bytes of the signed indirect addressing immediate. */
3463 const unsigned limit = 512;
3464
3465 brw_push_insn_state(p);
3466 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3467 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3468
3469 /* Take into account the component size and horizontal stride. */
3470 assert(src.vstride == src.hstride + src.width);
3471 brw_SHL(p, addr, vec1(idx),
3472 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3473 src.hstride - 1));
3474
3475 /* We can only address up to limit bytes using the indirect
3476 * addressing immediate, account for the difference if the source
3477 * register is above this limit.
3478 */
3479 if (offset >= limit) {
3480 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3481 offset = offset % limit;
3482 }
3483
3484 brw_pop_insn_state(p);
3485
3486 /* Use indirect addressing to fetch the specified component. */
3487 if (type_sz(src.type) > 4 &&
3488 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3489 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3490 *
3491 * "When source or destination datatype is 64b or operation is
3492 * integer DWord multiply, indirect addressing must not be
3493 * used."
3494 *
3495 * To work around both of this issue, we do two integer MOVs
3496 * insead of one 64-bit MOV. Because no double value should ever
3497 * cross a register boundary, it's safe to use the immediate
3498 * offset in the indirect here to handle adding 4 bytes to the
3499 * offset and avoid the extra ADD to the register file.
3500 */
3501 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3502 retype(brw_vec1_indirect(addr.subnr, offset),
3503 BRW_REGISTER_TYPE_D));
3504 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3505 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3506 BRW_REGISTER_TYPE_D));
3507 } else {
3508 brw_MOV(p, dst,
3509 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3510 }
3511 } else {
3512 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3513 * to all bits of a flag register,
3514 */
3515 inst = brw_MOV(p,
3516 brw_null_reg(),
3517 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3518 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3519 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3520 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3521
3522 /* and use predicated SEL to pick the right channel. */
3523 inst = brw_SEL(p, dst,
3524 stride(suboffset(src, 4), 4, 4, 1),
3525 stride(src, 4, 4, 1));
3526 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3527 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3528 }
3529 }
3530
3531 brw_pop_insn_state(p);
3532 }
3533
3534 /**
3535 * This instruction is generated as a single-channel align1 instruction by
3536 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3537 *
3538 * We can't use the typed atomic op in the FS because that has the execution
3539 * mask ANDed with the pixel mask, but we just want to write the one dword for
3540 * all the pixels.
3541 *
3542 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3543 * one u32. So we use the same untyped atomic write message as the pixel
3544 * shader.
3545 *
3546 * The untyped atomic operation requires a BUFFER surface type with RAW
3547 * format, and is only accessible through the legacy DATA_CACHE dataport
3548 * messages.
3549 */
3550 void brw_shader_time_add(struct brw_codegen *p,
3551 struct brw_reg payload,
3552 uint32_t surf_index)
3553 {
3554 const struct gen_device_info *devinfo = p->devinfo;
3555 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3556 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3557 GEN7_SFID_DATAPORT_DATA_CACHE);
3558 assert(devinfo->gen >= 7);
3559
3560 brw_push_insn_state(p);
3561 brw_set_default_access_mode(p, BRW_ALIGN_1);
3562 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3563 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3564 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3565
3566 /* We use brw_vec1_reg and unmasked because we want to increment the given
3567 * offset only once.
3568 */
3569 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3570 BRW_ARF_NULL, 0));
3571 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3572 payload.nr, 0));
3573 brw_set_desc(p, send, (brw_message_desc(devinfo, 2, 0, false) |
3574 brw_dp_untyped_atomic_desc(p, BRW_AOP_ADD, false)));
3575
3576 brw_inst_set_sfid(devinfo, send, sfid);
3577 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3578
3579 brw_pop_insn_state(p);
3580 }
3581
3582
3583 /**
3584 * Emit the SEND message for a barrier
3585 */
3586 void
3587 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3588 {
3589 const struct gen_device_info *devinfo = p->devinfo;
3590 struct brw_inst *inst;
3591
3592 assert(devinfo->gen >= 7);
3593
3594 brw_push_insn_state(p);
3595 brw_set_default_access_mode(p, BRW_ALIGN_1);
3596 inst = next_insn(p, BRW_OPCODE_SEND);
3597 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3598 brw_set_src0(p, inst, src);
3599 brw_set_src1(p, inst, brw_null_reg());
3600 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3601
3602 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3603 brw_inst_set_gateway_notify(devinfo, inst, 1);
3604 brw_inst_set_gateway_subfuncid(devinfo, inst,
3605 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3606
3607 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3608 brw_pop_insn_state(p);
3609 }
3610
3611
3612 /**
3613 * Emit the wait instruction for a barrier
3614 */
3615 void
3616 brw_WAIT(struct brw_codegen *p)
3617 {
3618 const struct gen_device_info *devinfo = p->devinfo;
3619 struct brw_inst *insn;
3620
3621 struct brw_reg src = brw_notification_reg();
3622
3623 insn = next_insn(p, BRW_OPCODE_WAIT);
3624 brw_set_dest(p, insn, src);
3625 brw_set_src0(p, insn, src);
3626 brw_set_src1(p, insn, brw_null_reg());
3627
3628 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3629 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3630 }
3631
3632 /**
3633 * Changes the floating point rounding mode updating the control register
3634 * field defined at cr0.0[5-6] bits. This function supports the changes to
3635 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3636 * Only RTNE and RTZ rounding are enabled at nir.
3637 */
3638 void
3639 brw_rounding_mode(struct brw_codegen *p,
3640 enum brw_rnd_mode mode)
3641 {
3642 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3643
3644 if (bits != BRW_CR0_RND_MODE_MASK) {
3645 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3646 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3647 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3648
3649 /* From the Skylake PRM, Volume 7, page 760:
3650 * "Implementation Restriction on Register Access: When the control
3651 * register is used as an explicit source and/or destination, hardware
3652 * does not ensure execution pipeline coherency. Software must set the
3653 * thread control field to ‘switch’ for an instruction that uses
3654 * control register as an explicit operand."
3655 */
3656 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3657 }
3658
3659 if (bits) {
3660 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3661 brw_imm_ud(bits));
3662 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3663 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3664 }
3665 }