intel/eu: Add has_simd4x2 bools to surface_write functions
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file == BRW_GENERAL_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file == BRW_GENERAL_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file == BRW_GENERAL_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Specify the descriptor and extended descriptor immediate for a SEND(C)
364 * message instruction.
365 */
366 void
367 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
368 unsigned desc, unsigned ex_desc)
369 {
370 const struct gen_device_info *devinfo = p->devinfo;
371 assert(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
372 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC);
373 brw_inst_set_src1_file_type(devinfo, inst,
374 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_UD);
375 brw_inst_set_send_desc(devinfo, inst, desc);
376 if (devinfo->gen >= 9)
377 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
378 }
379
380 static void brw_set_math_message( struct brw_codegen *p,
381 brw_inst *inst,
382 unsigned function,
383 unsigned integer_type,
384 bool low_precision,
385 unsigned dataType )
386 {
387 const struct gen_device_info *devinfo = p->devinfo;
388 unsigned msg_length;
389 unsigned response_length;
390
391 /* Infer message length from the function */
392 switch (function) {
393 case BRW_MATH_FUNCTION_POW:
394 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
395 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
396 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
397 msg_length = 2;
398 break;
399 default:
400 msg_length = 1;
401 break;
402 }
403
404 /* Infer response length from the function */
405 switch (function) {
406 case BRW_MATH_FUNCTION_SINCOS:
407 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
408 response_length = 2;
409 break;
410 default:
411 response_length = 1;
412 break;
413 }
414
415 brw_set_desc(p, inst, brw_message_desc(
416 devinfo, msg_length, response_length, false));
417
418 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
419 brw_inst_set_math_msg_function(devinfo, inst, function);
420 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
421 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
422 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
423 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
424 brw_inst_set_saturate(devinfo, inst, 0);
425 }
426
427
428 static void brw_set_ff_sync_message(struct brw_codegen *p,
429 brw_inst *insn,
430 bool allocate,
431 unsigned response_length,
432 bool end_of_thread)
433 {
434 const struct gen_device_info *devinfo = p->devinfo;
435
436 brw_set_desc(p, insn, brw_message_desc(
437 devinfo, 1, response_length, true));
438
439 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
440 brw_inst_set_eot(devinfo, insn, end_of_thread);
441 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
442 brw_inst_set_urb_allocate(devinfo, insn, allocate);
443 /* The following fields are not used by FF_SYNC: */
444 brw_inst_set_urb_global_offset(devinfo, insn, 0);
445 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
446 brw_inst_set_urb_used(devinfo, insn, 0);
447 brw_inst_set_urb_complete(devinfo, insn, 0);
448 }
449
450 static void brw_set_urb_message( struct brw_codegen *p,
451 brw_inst *insn,
452 enum brw_urb_write_flags flags,
453 unsigned msg_length,
454 unsigned response_length,
455 unsigned offset,
456 unsigned swizzle_control )
457 {
458 const struct gen_device_info *devinfo = p->devinfo;
459
460 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
461 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
462 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
463
464 brw_set_desc(p, insn, brw_message_desc(
465 devinfo, msg_length, response_length, true));
466
467 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
468 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
469
470 if (flags & BRW_URB_WRITE_OWORD) {
471 assert(msg_length == 2); /* header + one OWORD of data */
472 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
473 } else {
474 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
475 }
476
477 brw_inst_set_urb_global_offset(devinfo, insn, offset);
478 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
479
480 if (devinfo->gen < 8) {
481 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
482 }
483
484 if (devinfo->gen < 7) {
485 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
486 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
487 } else {
488 brw_inst_set_urb_per_slot_offset(devinfo, insn,
489 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
490 }
491 }
492
493 static void
494 gen7_set_dp_scratch_message(struct brw_codegen *p,
495 brw_inst *inst,
496 bool write,
497 bool dword,
498 bool invalidate_after_read,
499 unsigned num_regs,
500 unsigned addr_offset,
501 unsigned mlen,
502 unsigned rlen,
503 bool header_present)
504 {
505 const struct gen_device_info *devinfo = p->devinfo;
506 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
507 (devinfo->gen >= 8 && num_regs == 8));
508 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
509 num_regs - 1);
510
511 brw_set_desc(p, inst, brw_message_desc(
512 devinfo, mlen, rlen, header_present));
513
514 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
515 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
516 brw_inst_set_scratch_read_write(devinfo, inst, write);
517 brw_inst_set_scratch_type(devinfo, inst, dword);
518 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
519 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
520 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
521 }
522
523 static void
524 brw_inst_set_state(const struct gen_device_info *devinfo,
525 brw_inst *insn,
526 const struct brw_insn_state *state)
527 {
528 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
529 brw_inst_set_group(devinfo, insn, state->group);
530 brw_inst_set_compression(devinfo, insn, state->compressed);
531 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
532 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
533 brw_inst_set_saturate(devinfo, insn, state->saturate);
534 brw_inst_set_pred_control(devinfo, insn, state->predicate);
535 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
536
537 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
538 state->access_mode == BRW_ALIGN_16) {
539 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
540 if (devinfo->gen >= 7)
541 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
542 } else {
543 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
544 if (devinfo->gen >= 7)
545 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
546 }
547
548 if (devinfo->gen >= 6)
549 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
550 }
551
552 #define next_insn brw_next_insn
553 brw_inst *
554 brw_next_insn(struct brw_codegen *p, unsigned opcode)
555 {
556 const struct gen_device_info *devinfo = p->devinfo;
557 brw_inst *insn;
558
559 if (p->nr_insn + 1 > p->store_size) {
560 p->store_size <<= 1;
561 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
562 }
563
564 p->next_insn_offset += 16;
565 insn = &p->store[p->nr_insn++];
566
567 memset(insn, 0, sizeof(*insn));
568 brw_inst_set_opcode(devinfo, insn, opcode);
569
570 /* Apply the default instruction state */
571 brw_inst_set_state(devinfo, insn, p->current);
572
573 return insn;
574 }
575
576 static brw_inst *
577 brw_alu1(struct brw_codegen *p, unsigned opcode,
578 struct brw_reg dest, struct brw_reg src)
579 {
580 brw_inst *insn = next_insn(p, opcode);
581 brw_set_dest(p, insn, dest);
582 brw_set_src0(p, insn, src);
583 return insn;
584 }
585
586 static brw_inst *
587 brw_alu2(struct brw_codegen *p, unsigned opcode,
588 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
589 {
590 /* 64-bit immediates are only supported on 1-src instructions */
591 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
592 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
593
594 brw_inst *insn = next_insn(p, opcode);
595 brw_set_dest(p, insn, dest);
596 brw_set_src0(p, insn, src0);
597 brw_set_src1(p, insn, src1);
598 return insn;
599 }
600
601 static int
602 get_3src_subreg_nr(struct brw_reg reg)
603 {
604 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
605 * use 32-bit units (components 0..7). Since they only support F/D/UD
606 * types, this doesn't lose any flexibility, but uses fewer bits.
607 */
608 return reg.subnr / 4;
609 }
610
611 static enum gen10_align1_3src_vertical_stride
612 to_3src_align1_vstride(enum brw_vertical_stride vstride)
613 {
614 switch (vstride) {
615 case BRW_VERTICAL_STRIDE_0:
616 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
617 case BRW_VERTICAL_STRIDE_2:
618 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
619 case BRW_VERTICAL_STRIDE_4:
620 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
621 case BRW_VERTICAL_STRIDE_8:
622 case BRW_VERTICAL_STRIDE_16:
623 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
624 default:
625 unreachable("invalid vstride");
626 }
627 }
628
629
630 static enum gen10_align1_3src_src_horizontal_stride
631 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
632 {
633 switch (hstride) {
634 case BRW_HORIZONTAL_STRIDE_0:
635 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
636 case BRW_HORIZONTAL_STRIDE_1:
637 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
638 case BRW_HORIZONTAL_STRIDE_2:
639 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
640 case BRW_HORIZONTAL_STRIDE_4:
641 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
642 default:
643 unreachable("invalid hstride");
644 }
645 }
646
647 static brw_inst *
648 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
649 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
650 {
651 const struct gen_device_info *devinfo = p->devinfo;
652 brw_inst *inst = next_insn(p, opcode);
653
654 gen7_convert_mrf_to_grf(p, &dest);
655
656 assert(dest.nr < 128);
657 assert(src0.file != BRW_IMMEDIATE_VALUE || src0.nr < 128);
658 assert(src1.file != BRW_IMMEDIATE_VALUE || src1.nr < 128);
659 assert(src2.file != BRW_IMMEDIATE_VALUE || src2.nr < 128);
660 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
661 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
662 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
663 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
664
665 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
666 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
667 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
668
669 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
670 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
671 BRW_ALIGN1_3SRC_ACCUMULATOR);
672 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
673 } else {
674 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
675 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
676 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
677 }
678 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
679
680 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
681
682 if (brw_reg_type_is_floating_point(dest.type)) {
683 brw_inst_set_3src_a1_exec_type(devinfo, inst,
684 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
685 } else {
686 brw_inst_set_3src_a1_exec_type(devinfo, inst,
687 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
688 }
689
690 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
691 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
692 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
693 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
694
695 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
696 to_3src_align1_vstride(src0.vstride));
697 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
698 to_3src_align1_vstride(src1.vstride));
699 /* no vstride on src2 */
700
701 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
702 to_3src_align1_hstride(src0.hstride));
703 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
704 to_3src_align1_hstride(src1.hstride));
705 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
706 to_3src_align1_hstride(src2.hstride));
707
708 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
709 if (src0.type == BRW_REGISTER_TYPE_NF) {
710 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
711 } else {
712 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
713 }
714 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
715 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
716
717 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
718 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
719 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
720 } else {
721 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
722 }
723 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
724 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
725
726 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
727 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
728 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
729 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
730
731 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
732 src0.file == BRW_IMMEDIATE_VALUE ||
733 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
734 src0.type == BRW_REGISTER_TYPE_NF));
735 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
736 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
737 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
738 src2.file == BRW_IMMEDIATE_VALUE);
739
740 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
741 src0.file == BRW_GENERAL_REGISTER_FILE ?
742 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
743 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
744 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
745 src1.file == BRW_GENERAL_REGISTER_FILE ?
746 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
747 BRW_ALIGN1_3SRC_ACCUMULATOR);
748 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
749 src2.file == BRW_GENERAL_REGISTER_FILE ?
750 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
751 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
752 } else {
753 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
754 dest.file == BRW_MESSAGE_REGISTER_FILE);
755 assert(dest.type == BRW_REGISTER_TYPE_F ||
756 dest.type == BRW_REGISTER_TYPE_DF ||
757 dest.type == BRW_REGISTER_TYPE_D ||
758 dest.type == BRW_REGISTER_TYPE_UD);
759 if (devinfo->gen == 6) {
760 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
761 dest.file == BRW_MESSAGE_REGISTER_FILE);
762 }
763 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
764 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
765 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
766
767 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
768 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
769 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
770 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
771 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
772 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
773 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
774 src0.vstride == BRW_VERTICAL_STRIDE_0);
775
776 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
777 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
778 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
779 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
780 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
781 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
782 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
783 src1.vstride == BRW_VERTICAL_STRIDE_0);
784
785 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
786 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
787 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
788 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
789 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
790 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
791 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
792 src2.vstride == BRW_VERTICAL_STRIDE_0);
793
794 if (devinfo->gen >= 7) {
795 /* Set both the source and destination types based on dest.type,
796 * ignoring the source register types. The MAD and LRP emitters ensure
797 * that all four types are float. The BFE and BFI2 emitters, however,
798 * may send us mixed D and UD types and want us to ignore that and use
799 * the destination type.
800 */
801 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
802 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
803 }
804 }
805
806 return inst;
807 }
808
809
810 /***********************************************************************
811 * Convenience routines.
812 */
813 #define ALU1(OP) \
814 brw_inst *brw_##OP(struct brw_codegen *p, \
815 struct brw_reg dest, \
816 struct brw_reg src0) \
817 { \
818 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
819 }
820
821 #define ALU2(OP) \
822 brw_inst *brw_##OP(struct brw_codegen *p, \
823 struct brw_reg dest, \
824 struct brw_reg src0, \
825 struct brw_reg src1) \
826 { \
827 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
828 }
829
830 #define ALU3(OP) \
831 brw_inst *brw_##OP(struct brw_codegen *p, \
832 struct brw_reg dest, \
833 struct brw_reg src0, \
834 struct brw_reg src1, \
835 struct brw_reg src2) \
836 { \
837 if (p->current->access_mode == BRW_ALIGN_16) { \
838 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
839 src0.swizzle = BRW_SWIZZLE_XXXX; \
840 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
841 src1.swizzle = BRW_SWIZZLE_XXXX; \
842 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
843 src2.swizzle = BRW_SWIZZLE_XXXX; \
844 } \
845 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
846 }
847
848 #define ALU3F(OP) \
849 brw_inst *brw_##OP(struct brw_codegen *p, \
850 struct brw_reg dest, \
851 struct brw_reg src0, \
852 struct brw_reg src1, \
853 struct brw_reg src2) \
854 { \
855 assert(dest.type == BRW_REGISTER_TYPE_F || \
856 dest.type == BRW_REGISTER_TYPE_DF); \
857 if (dest.type == BRW_REGISTER_TYPE_F) { \
858 assert(src0.type == BRW_REGISTER_TYPE_F); \
859 assert(src1.type == BRW_REGISTER_TYPE_F); \
860 assert(src2.type == BRW_REGISTER_TYPE_F); \
861 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
862 assert(src0.type == BRW_REGISTER_TYPE_DF); \
863 assert(src1.type == BRW_REGISTER_TYPE_DF); \
864 assert(src2.type == BRW_REGISTER_TYPE_DF); \
865 } \
866 \
867 if (p->current->access_mode == BRW_ALIGN_16) { \
868 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
869 src0.swizzle = BRW_SWIZZLE_XXXX; \
870 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
871 src1.swizzle = BRW_SWIZZLE_XXXX; \
872 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
873 src2.swizzle = BRW_SWIZZLE_XXXX; \
874 } \
875 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
876 }
877
878 /* Rounding operations (other than RNDD) require two instructions - the first
879 * stores a rounded value (possibly the wrong way) in the dest register, but
880 * also sets a per-channel "increment bit" in the flag register. A predicated
881 * add of 1.0 fixes dest to contain the desired result.
882 *
883 * Sandybridge and later appear to round correctly without an ADD.
884 */
885 #define ROUND(OP) \
886 void brw_##OP(struct brw_codegen *p, \
887 struct brw_reg dest, \
888 struct brw_reg src) \
889 { \
890 const struct gen_device_info *devinfo = p->devinfo; \
891 brw_inst *rnd, *add; \
892 rnd = next_insn(p, BRW_OPCODE_##OP); \
893 brw_set_dest(p, rnd, dest); \
894 brw_set_src0(p, rnd, src); \
895 \
896 if (devinfo->gen < 6) { \
897 /* turn on round-increments */ \
898 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
899 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
900 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
901 } \
902 }
903
904
905 ALU2(SEL)
906 ALU1(NOT)
907 ALU2(AND)
908 ALU2(OR)
909 ALU2(XOR)
910 ALU2(SHR)
911 ALU2(SHL)
912 ALU1(DIM)
913 ALU2(ASR)
914 ALU3(CSEL)
915 ALU1(FRC)
916 ALU1(RNDD)
917 ALU2(MAC)
918 ALU2(MACH)
919 ALU1(LZD)
920 ALU2(DP4)
921 ALU2(DPH)
922 ALU2(DP3)
923 ALU2(DP2)
924 ALU3(MAD)
925 ALU3F(LRP)
926 ALU1(BFREV)
927 ALU3(BFE)
928 ALU2(BFI1)
929 ALU3(BFI2)
930 ALU1(FBH)
931 ALU1(FBL)
932 ALU1(CBIT)
933 ALU2(ADDC)
934 ALU2(SUBB)
935
936 ROUND(RNDZ)
937 ROUND(RNDE)
938
939 brw_inst *
940 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
941 {
942 const struct gen_device_info *devinfo = p->devinfo;
943
944 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
945 * To avoid the problems that causes, we use an <X,2,0> source region to
946 * read each element twice.
947 */
948 if (devinfo->gen == 7 && !devinfo->is_haswell &&
949 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
950 dest.type == BRW_REGISTER_TYPE_DF &&
951 (src0.type == BRW_REGISTER_TYPE_F ||
952 src0.type == BRW_REGISTER_TYPE_D ||
953 src0.type == BRW_REGISTER_TYPE_UD) &&
954 !has_scalar_region(src0)) {
955 assert(src0.vstride == src0.width + src0.hstride);
956 src0.vstride = src0.hstride;
957 src0.width = BRW_WIDTH_2;
958 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
959 }
960
961 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
962 }
963
964 brw_inst *
965 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
966 struct brw_reg src0, struct brw_reg src1)
967 {
968 /* 6.2.2: add */
969 if (src0.type == BRW_REGISTER_TYPE_F ||
970 (src0.file == BRW_IMMEDIATE_VALUE &&
971 src0.type == BRW_REGISTER_TYPE_VF)) {
972 assert(src1.type != BRW_REGISTER_TYPE_UD);
973 assert(src1.type != BRW_REGISTER_TYPE_D);
974 }
975
976 if (src1.type == BRW_REGISTER_TYPE_F ||
977 (src1.file == BRW_IMMEDIATE_VALUE &&
978 src1.type == BRW_REGISTER_TYPE_VF)) {
979 assert(src0.type != BRW_REGISTER_TYPE_UD);
980 assert(src0.type != BRW_REGISTER_TYPE_D);
981 }
982
983 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
984 }
985
986 brw_inst *
987 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
988 struct brw_reg src0, struct brw_reg src1)
989 {
990 assert(dest.type == src0.type);
991 assert(src0.type == src1.type);
992 switch (src0.type) {
993 case BRW_REGISTER_TYPE_B:
994 case BRW_REGISTER_TYPE_UB:
995 case BRW_REGISTER_TYPE_W:
996 case BRW_REGISTER_TYPE_UW:
997 case BRW_REGISTER_TYPE_D:
998 case BRW_REGISTER_TYPE_UD:
999 break;
1000 default:
1001 unreachable("Bad type for brw_AVG");
1002 }
1003
1004 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1005 }
1006
1007 brw_inst *
1008 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1009 struct brw_reg src0, struct brw_reg src1)
1010 {
1011 /* 6.32.38: mul */
1012 if (src0.type == BRW_REGISTER_TYPE_D ||
1013 src0.type == BRW_REGISTER_TYPE_UD ||
1014 src1.type == BRW_REGISTER_TYPE_D ||
1015 src1.type == BRW_REGISTER_TYPE_UD) {
1016 assert(dest.type != BRW_REGISTER_TYPE_F);
1017 }
1018
1019 if (src0.type == BRW_REGISTER_TYPE_F ||
1020 (src0.file == BRW_IMMEDIATE_VALUE &&
1021 src0.type == BRW_REGISTER_TYPE_VF)) {
1022 assert(src1.type != BRW_REGISTER_TYPE_UD);
1023 assert(src1.type != BRW_REGISTER_TYPE_D);
1024 }
1025
1026 if (src1.type == BRW_REGISTER_TYPE_F ||
1027 (src1.file == BRW_IMMEDIATE_VALUE &&
1028 src1.type == BRW_REGISTER_TYPE_VF)) {
1029 assert(src0.type != BRW_REGISTER_TYPE_UD);
1030 assert(src0.type != BRW_REGISTER_TYPE_D);
1031 }
1032
1033 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1034 src0.nr != BRW_ARF_ACCUMULATOR);
1035 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1036 src1.nr != BRW_ARF_ACCUMULATOR);
1037
1038 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1039 }
1040
1041 brw_inst *
1042 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1043 struct brw_reg src0, struct brw_reg src1)
1044 {
1045 src0.vstride = BRW_VERTICAL_STRIDE_0;
1046 src0.width = BRW_WIDTH_1;
1047 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1048 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1049 }
1050
1051 brw_inst *
1052 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1053 struct brw_reg src0, struct brw_reg src1)
1054 {
1055 src0.vstride = BRW_VERTICAL_STRIDE_0;
1056 src0.width = BRW_WIDTH_1;
1057 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1058 src1.vstride = BRW_VERTICAL_STRIDE_8;
1059 src1.width = BRW_WIDTH_8;
1060 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1061 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1062 }
1063
1064 brw_inst *
1065 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1066 {
1067 const struct gen_device_info *devinfo = p->devinfo;
1068 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1069 /* The F32TO16 instruction doesn't support 32-bit destination types in
1070 * Align1 mode, and neither does the Gen8 implementation in terms of a
1071 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1072 * an undocumented feature.
1073 */
1074 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1075 (!align16 || devinfo->gen >= 8));
1076 brw_inst *inst;
1077
1078 if (align16) {
1079 assert(dst.type == BRW_REGISTER_TYPE_UD);
1080 } else {
1081 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1082 dst.type == BRW_REGISTER_TYPE_W ||
1083 dst.type == BRW_REGISTER_TYPE_UW ||
1084 dst.type == BRW_REGISTER_TYPE_HF);
1085 }
1086
1087 brw_push_insn_state(p);
1088
1089 if (needs_zero_fill) {
1090 brw_set_default_access_mode(p, BRW_ALIGN_1);
1091 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1092 }
1093
1094 if (devinfo->gen >= 8) {
1095 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1096 } else {
1097 assert(devinfo->gen == 7);
1098 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1099 }
1100
1101 if (needs_zero_fill) {
1102 brw_inst_set_no_dd_clear(devinfo, inst, true);
1103 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1104 brw_inst_set_no_dd_check(devinfo, inst, true);
1105 }
1106
1107 brw_pop_insn_state(p);
1108 return inst;
1109 }
1110
1111 brw_inst *
1112 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1113 {
1114 const struct gen_device_info *devinfo = p->devinfo;
1115 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1116
1117 if (align16) {
1118 assert(src.type == BRW_REGISTER_TYPE_UD);
1119 } else {
1120 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1121 *
1122 * Because this instruction does not have a 16-bit floating-point
1123 * type, the source data type must be Word (W). The destination type
1124 * must be F (Float).
1125 */
1126 if (src.type == BRW_REGISTER_TYPE_UD)
1127 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1128
1129 assert(src.type == BRW_REGISTER_TYPE_W ||
1130 src.type == BRW_REGISTER_TYPE_UW ||
1131 src.type == BRW_REGISTER_TYPE_HF);
1132 }
1133
1134 if (devinfo->gen >= 8) {
1135 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1136 } else {
1137 assert(devinfo->gen == 7);
1138 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1139 }
1140 }
1141
1142
1143 void brw_NOP(struct brw_codegen *p)
1144 {
1145 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1146 memset(insn, 0, sizeof(*insn));
1147 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1148 }
1149
1150
1151
1152
1153
1154 /***********************************************************************
1155 * Comparisons, if/else/endif
1156 */
1157
1158 brw_inst *
1159 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1160 unsigned predicate_control)
1161 {
1162 const struct gen_device_info *devinfo = p->devinfo;
1163 struct brw_reg ip = brw_ip_reg();
1164 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1165
1166 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1167 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1168 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1169 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1170
1171 return inst;
1172 }
1173
1174 static void
1175 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1176 {
1177 p->if_stack[p->if_stack_depth] = inst - p->store;
1178
1179 p->if_stack_depth++;
1180 if (p->if_stack_array_size <= p->if_stack_depth) {
1181 p->if_stack_array_size *= 2;
1182 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1183 p->if_stack_array_size);
1184 }
1185 }
1186
1187 static brw_inst *
1188 pop_if_stack(struct brw_codegen *p)
1189 {
1190 p->if_stack_depth--;
1191 return &p->store[p->if_stack[p->if_stack_depth]];
1192 }
1193
1194 static void
1195 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1196 {
1197 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1198 p->loop_stack_array_size *= 2;
1199 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1200 p->loop_stack_array_size);
1201 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1202 p->loop_stack_array_size);
1203 }
1204
1205 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1206 p->loop_stack_depth++;
1207 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1208 }
1209
1210 static brw_inst *
1211 get_inner_do_insn(struct brw_codegen *p)
1212 {
1213 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1214 }
1215
1216 /* EU takes the value from the flag register and pushes it onto some
1217 * sort of a stack (presumably merging with any flag value already on
1218 * the stack). Within an if block, the flags at the top of the stack
1219 * control execution on each channel of the unit, eg. on each of the
1220 * 16 pixel values in our wm programs.
1221 *
1222 * When the matching 'else' instruction is reached (presumably by
1223 * countdown of the instruction count patched in by our ELSE/ENDIF
1224 * functions), the relevant flags are inverted.
1225 *
1226 * When the matching 'endif' instruction is reached, the flags are
1227 * popped off. If the stack is now empty, normal execution resumes.
1228 */
1229 brw_inst *
1230 brw_IF(struct brw_codegen *p, unsigned execute_size)
1231 {
1232 const struct gen_device_info *devinfo = p->devinfo;
1233 brw_inst *insn;
1234
1235 insn = next_insn(p, BRW_OPCODE_IF);
1236
1237 /* Override the defaults for this instruction:
1238 */
1239 if (devinfo->gen < 6) {
1240 brw_set_dest(p, insn, brw_ip_reg());
1241 brw_set_src0(p, insn, brw_ip_reg());
1242 brw_set_src1(p, insn, brw_imm_d(0x0));
1243 } else if (devinfo->gen == 6) {
1244 brw_set_dest(p, insn, brw_imm_w(0));
1245 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1246 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1247 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1248 } else if (devinfo->gen == 7) {
1249 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1250 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1251 brw_set_src1(p, insn, brw_imm_w(0));
1252 brw_inst_set_jip(devinfo, insn, 0);
1253 brw_inst_set_uip(devinfo, insn, 0);
1254 } else {
1255 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1256 brw_set_src0(p, insn, brw_imm_d(0));
1257 brw_inst_set_jip(devinfo, insn, 0);
1258 brw_inst_set_uip(devinfo, insn, 0);
1259 }
1260
1261 brw_inst_set_exec_size(devinfo, insn, execute_size);
1262 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1263 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1264 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1265 if (!p->single_program_flow && devinfo->gen < 6)
1266 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1267
1268 push_if_stack(p, insn);
1269 p->if_depth_in_loop[p->loop_stack_depth]++;
1270 return insn;
1271 }
1272
1273 /* This function is only used for gen6-style IF instructions with an
1274 * embedded comparison (conditional modifier). It is not used on gen7.
1275 */
1276 brw_inst *
1277 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1278 struct brw_reg src0, struct brw_reg src1)
1279 {
1280 const struct gen_device_info *devinfo = p->devinfo;
1281 brw_inst *insn;
1282
1283 insn = next_insn(p, BRW_OPCODE_IF);
1284
1285 brw_set_dest(p, insn, brw_imm_w(0));
1286 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1287 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1288 brw_set_src0(p, insn, src0);
1289 brw_set_src1(p, insn, src1);
1290
1291 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1292 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1293 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1294
1295 push_if_stack(p, insn);
1296 return insn;
1297 }
1298
1299 /**
1300 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1301 */
1302 static void
1303 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1304 brw_inst *if_inst, brw_inst *else_inst)
1305 {
1306 const struct gen_device_info *devinfo = p->devinfo;
1307
1308 /* The next instruction (where the ENDIF would be, if it existed) */
1309 brw_inst *next_inst = &p->store[p->nr_insn];
1310
1311 assert(p->single_program_flow);
1312 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1313 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1314 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1315
1316 /* Convert IF to an ADD instruction that moves the instruction pointer
1317 * to the first instruction of the ELSE block. If there is no ELSE
1318 * block, point to where ENDIF would be. Reverse the predicate.
1319 *
1320 * There's no need to execute an ENDIF since we don't need to do any
1321 * stack operations, and if we're currently executing, we just want to
1322 * continue normally.
1323 */
1324 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1325 brw_inst_set_pred_inv(devinfo, if_inst, true);
1326
1327 if (else_inst != NULL) {
1328 /* Convert ELSE to an ADD instruction that points where the ENDIF
1329 * would be.
1330 */
1331 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1332
1333 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1334 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1335 } else {
1336 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1337 }
1338 }
1339
1340 /**
1341 * Patch IF and ELSE instructions with appropriate jump targets.
1342 */
1343 static void
1344 patch_IF_ELSE(struct brw_codegen *p,
1345 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1346 {
1347 const struct gen_device_info *devinfo = p->devinfo;
1348
1349 /* We shouldn't be patching IF and ELSE instructions in single program flow
1350 * mode when gen < 6, because in single program flow mode on those
1351 * platforms, we convert flow control instructions to conditional ADDs that
1352 * operate on IP (see brw_ENDIF).
1353 *
1354 * However, on Gen6, writing to IP doesn't work in single program flow mode
1355 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1356 * not be updated by non-flow control instructions."). And on later
1357 * platforms, there is no significant benefit to converting control flow
1358 * instructions to conditional ADDs. So we do patch IF and ELSE
1359 * instructions in single program flow mode on those platforms.
1360 */
1361 if (devinfo->gen < 6)
1362 assert(!p->single_program_flow);
1363
1364 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1365 assert(endif_inst != NULL);
1366 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1367
1368 unsigned br = brw_jump_scale(devinfo);
1369
1370 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1371 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1372
1373 if (else_inst == NULL) {
1374 /* Patch IF -> ENDIF */
1375 if (devinfo->gen < 6) {
1376 /* Turn it into an IFF, which means no mask stack operations for
1377 * all-false and jumping past the ENDIF.
1378 */
1379 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1380 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1381 br * (endif_inst - if_inst + 1));
1382 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1383 } else if (devinfo->gen == 6) {
1384 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1385 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1386 } else {
1387 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1388 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1389 }
1390 } else {
1391 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1392
1393 /* Patch IF -> ELSE */
1394 if (devinfo->gen < 6) {
1395 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1396 br * (else_inst - if_inst));
1397 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1398 } else if (devinfo->gen == 6) {
1399 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1400 br * (else_inst - if_inst + 1));
1401 }
1402
1403 /* Patch ELSE -> ENDIF */
1404 if (devinfo->gen < 6) {
1405 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1406 * matching ENDIF.
1407 */
1408 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1409 br * (endif_inst - else_inst + 1));
1410 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1411 } else if (devinfo->gen == 6) {
1412 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1413 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1414 br * (endif_inst - else_inst));
1415 } else {
1416 /* The IF instruction's JIP should point just past the ELSE */
1417 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1418 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1419 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1420 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1421 if (devinfo->gen >= 8) {
1422 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1423 * should point to ENDIF.
1424 */
1425 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1426 }
1427 }
1428 }
1429 }
1430
1431 void
1432 brw_ELSE(struct brw_codegen *p)
1433 {
1434 const struct gen_device_info *devinfo = p->devinfo;
1435 brw_inst *insn;
1436
1437 insn = next_insn(p, BRW_OPCODE_ELSE);
1438
1439 if (devinfo->gen < 6) {
1440 brw_set_dest(p, insn, brw_ip_reg());
1441 brw_set_src0(p, insn, brw_ip_reg());
1442 brw_set_src1(p, insn, brw_imm_d(0x0));
1443 } else if (devinfo->gen == 6) {
1444 brw_set_dest(p, insn, brw_imm_w(0));
1445 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1446 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1447 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1448 } else if (devinfo->gen == 7) {
1449 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1450 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1451 brw_set_src1(p, insn, brw_imm_w(0));
1452 brw_inst_set_jip(devinfo, insn, 0);
1453 brw_inst_set_uip(devinfo, insn, 0);
1454 } else {
1455 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1456 brw_set_src0(p, insn, brw_imm_d(0));
1457 brw_inst_set_jip(devinfo, insn, 0);
1458 brw_inst_set_uip(devinfo, insn, 0);
1459 }
1460
1461 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1462 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1463 if (!p->single_program_flow && devinfo->gen < 6)
1464 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1465
1466 push_if_stack(p, insn);
1467 }
1468
1469 void
1470 brw_ENDIF(struct brw_codegen *p)
1471 {
1472 const struct gen_device_info *devinfo = p->devinfo;
1473 brw_inst *insn = NULL;
1474 brw_inst *else_inst = NULL;
1475 brw_inst *if_inst = NULL;
1476 brw_inst *tmp;
1477 bool emit_endif = true;
1478
1479 /* In single program flow mode, we can express IF and ELSE instructions
1480 * equivalently as ADD instructions that operate on IP. On platforms prior
1481 * to Gen6, flow control instructions cause an implied thread switch, so
1482 * this is a significant savings.
1483 *
1484 * However, on Gen6, writing to IP doesn't work in single program flow mode
1485 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1486 * not be updated by non-flow control instructions."). And on later
1487 * platforms, there is no significant benefit to converting control flow
1488 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1489 * Gen5.
1490 */
1491 if (devinfo->gen < 6 && p->single_program_flow)
1492 emit_endif = false;
1493
1494 /*
1495 * A single next_insn() may change the base address of instruction store
1496 * memory(p->store), so call it first before referencing the instruction
1497 * store pointer from an index
1498 */
1499 if (emit_endif)
1500 insn = next_insn(p, BRW_OPCODE_ENDIF);
1501
1502 /* Pop the IF and (optional) ELSE instructions from the stack */
1503 p->if_depth_in_loop[p->loop_stack_depth]--;
1504 tmp = pop_if_stack(p);
1505 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1506 else_inst = tmp;
1507 tmp = pop_if_stack(p);
1508 }
1509 if_inst = tmp;
1510
1511 if (!emit_endif) {
1512 /* ENDIF is useless; don't bother emitting it. */
1513 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1514 return;
1515 }
1516
1517 if (devinfo->gen < 6) {
1518 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1519 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1520 brw_set_src1(p, insn, brw_imm_d(0x0));
1521 } else if (devinfo->gen == 6) {
1522 brw_set_dest(p, insn, brw_imm_w(0));
1523 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1524 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1525 } else if (devinfo->gen == 7) {
1526 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1527 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1528 brw_set_src1(p, insn, brw_imm_w(0));
1529 } else {
1530 brw_set_src0(p, insn, brw_imm_d(0));
1531 }
1532
1533 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1534 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1535 if (devinfo->gen < 6)
1536 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1537
1538 /* Also pop item off the stack in the endif instruction: */
1539 if (devinfo->gen < 6) {
1540 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1541 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1542 } else if (devinfo->gen == 6) {
1543 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1544 } else {
1545 brw_inst_set_jip(devinfo, insn, 2);
1546 }
1547 patch_IF_ELSE(p, if_inst, else_inst, insn);
1548 }
1549
1550 brw_inst *
1551 brw_BREAK(struct brw_codegen *p)
1552 {
1553 const struct gen_device_info *devinfo = p->devinfo;
1554 brw_inst *insn;
1555
1556 insn = next_insn(p, BRW_OPCODE_BREAK);
1557 if (devinfo->gen >= 8) {
1558 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1559 brw_set_src0(p, insn, brw_imm_d(0x0));
1560 } else if (devinfo->gen >= 6) {
1561 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1562 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1563 brw_set_src1(p, insn, brw_imm_d(0x0));
1564 } else {
1565 brw_set_dest(p, insn, brw_ip_reg());
1566 brw_set_src0(p, insn, brw_ip_reg());
1567 brw_set_src1(p, insn, brw_imm_d(0x0));
1568 brw_inst_set_gen4_pop_count(devinfo, insn,
1569 p->if_depth_in_loop[p->loop_stack_depth]);
1570 }
1571 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1572 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1573
1574 return insn;
1575 }
1576
1577 brw_inst *
1578 brw_CONT(struct brw_codegen *p)
1579 {
1580 const struct gen_device_info *devinfo = p->devinfo;
1581 brw_inst *insn;
1582
1583 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1584 brw_set_dest(p, insn, brw_ip_reg());
1585 if (devinfo->gen >= 8) {
1586 brw_set_src0(p, insn, brw_imm_d(0x0));
1587 } else {
1588 brw_set_src0(p, insn, brw_ip_reg());
1589 brw_set_src1(p, insn, brw_imm_d(0x0));
1590 }
1591
1592 if (devinfo->gen < 6) {
1593 brw_inst_set_gen4_pop_count(devinfo, insn,
1594 p->if_depth_in_loop[p->loop_stack_depth]);
1595 }
1596 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1597 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1598 return insn;
1599 }
1600
1601 brw_inst *
1602 gen6_HALT(struct brw_codegen *p)
1603 {
1604 const struct gen_device_info *devinfo = p->devinfo;
1605 brw_inst *insn;
1606
1607 insn = next_insn(p, BRW_OPCODE_HALT);
1608 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1609 if (devinfo->gen >= 8) {
1610 brw_set_src0(p, insn, brw_imm_d(0x0));
1611 } else {
1612 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1613 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1614 }
1615
1616 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1617 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1618 return insn;
1619 }
1620
1621 /* DO/WHILE loop:
1622 *
1623 * The DO/WHILE is just an unterminated loop -- break or continue are
1624 * used for control within the loop. We have a few ways they can be
1625 * done.
1626 *
1627 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1628 * jip and no DO instruction.
1629 *
1630 * For non-uniform control flow pre-gen6, there's a DO instruction to
1631 * push the mask, and a WHILE to jump back, and BREAK to get out and
1632 * pop the mask.
1633 *
1634 * For gen6, there's no more mask stack, so no need for DO. WHILE
1635 * just points back to the first instruction of the loop.
1636 */
1637 brw_inst *
1638 brw_DO(struct brw_codegen *p, unsigned execute_size)
1639 {
1640 const struct gen_device_info *devinfo = p->devinfo;
1641
1642 if (devinfo->gen >= 6 || p->single_program_flow) {
1643 push_loop_stack(p, &p->store[p->nr_insn]);
1644 return &p->store[p->nr_insn];
1645 } else {
1646 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1647
1648 push_loop_stack(p, insn);
1649
1650 /* Override the defaults for this instruction:
1651 */
1652 brw_set_dest(p, insn, brw_null_reg());
1653 brw_set_src0(p, insn, brw_null_reg());
1654 brw_set_src1(p, insn, brw_null_reg());
1655
1656 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1657 brw_inst_set_exec_size(devinfo, insn, execute_size);
1658 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1659
1660 return insn;
1661 }
1662 }
1663
1664 /**
1665 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1666 * instruction here.
1667 *
1668 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1669 * nesting, since it can always just point to the end of the block/current loop.
1670 */
1671 static void
1672 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1673 {
1674 const struct gen_device_info *devinfo = p->devinfo;
1675 brw_inst *do_inst = get_inner_do_insn(p);
1676 brw_inst *inst;
1677 unsigned br = brw_jump_scale(devinfo);
1678
1679 assert(devinfo->gen < 6);
1680
1681 for (inst = while_inst - 1; inst != do_inst; inst--) {
1682 /* If the jump count is != 0, that means that this instruction has already
1683 * been patched because it's part of a loop inside of the one we're
1684 * patching.
1685 */
1686 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1687 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1688 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1689 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1690 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1691 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1692 }
1693 }
1694 }
1695
1696 brw_inst *
1697 brw_WHILE(struct brw_codegen *p)
1698 {
1699 const struct gen_device_info *devinfo = p->devinfo;
1700 brw_inst *insn, *do_insn;
1701 unsigned br = brw_jump_scale(devinfo);
1702
1703 if (devinfo->gen >= 6) {
1704 insn = next_insn(p, BRW_OPCODE_WHILE);
1705 do_insn = get_inner_do_insn(p);
1706
1707 if (devinfo->gen >= 8) {
1708 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1709 brw_set_src0(p, insn, brw_imm_d(0));
1710 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1711 } else if (devinfo->gen == 7) {
1712 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1713 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1714 brw_set_src1(p, insn, brw_imm_w(0));
1715 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1716 } else {
1717 brw_set_dest(p, insn, brw_imm_w(0));
1718 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1719 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1720 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1721 }
1722
1723 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1724
1725 } else {
1726 if (p->single_program_flow) {
1727 insn = next_insn(p, BRW_OPCODE_ADD);
1728 do_insn = get_inner_do_insn(p);
1729
1730 brw_set_dest(p, insn, brw_ip_reg());
1731 brw_set_src0(p, insn, brw_ip_reg());
1732 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1733 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1734 } else {
1735 insn = next_insn(p, BRW_OPCODE_WHILE);
1736 do_insn = get_inner_do_insn(p);
1737
1738 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1739
1740 brw_set_dest(p, insn, brw_ip_reg());
1741 brw_set_src0(p, insn, brw_ip_reg());
1742 brw_set_src1(p, insn, brw_imm_d(0));
1743
1744 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1745 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1746 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1747
1748 brw_patch_break_cont(p, insn);
1749 }
1750 }
1751 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1752
1753 p->loop_stack_depth--;
1754
1755 return insn;
1756 }
1757
1758 /* FORWARD JUMPS:
1759 */
1760 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1761 {
1762 const struct gen_device_info *devinfo = p->devinfo;
1763 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1764 unsigned jmpi = 1;
1765
1766 if (devinfo->gen >= 5)
1767 jmpi = 2;
1768
1769 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1770 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1771
1772 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1773 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1774 }
1775
1776 /* To integrate with the above, it makes sense that the comparison
1777 * instruction should populate the flag register. It might be simpler
1778 * just to use the flag reg for most WM tasks?
1779 */
1780 void brw_CMP(struct brw_codegen *p,
1781 struct brw_reg dest,
1782 unsigned conditional,
1783 struct brw_reg src0,
1784 struct brw_reg src1)
1785 {
1786 const struct gen_device_info *devinfo = p->devinfo;
1787 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1788
1789 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1790 brw_set_dest(p, insn, dest);
1791 brw_set_src0(p, insn, src0);
1792 brw_set_src1(p, insn, src1);
1793
1794 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1795 * page says:
1796 * "Any CMP instruction with a null destination must use a {switch}."
1797 *
1798 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1799 * mentioned on their work-arounds pages.
1800 */
1801 if (devinfo->gen == 7) {
1802 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1803 dest.nr == BRW_ARF_NULL) {
1804 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1805 }
1806 }
1807 }
1808
1809 /***********************************************************************
1810 * Helpers for the various SEND message types:
1811 */
1812
1813 /** Extended math function, float[8].
1814 */
1815 void gen4_math(struct brw_codegen *p,
1816 struct brw_reg dest,
1817 unsigned function,
1818 unsigned msg_reg_nr,
1819 struct brw_reg src,
1820 unsigned precision )
1821 {
1822 const struct gen_device_info *devinfo = p->devinfo;
1823 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1824 unsigned data_type;
1825 if (has_scalar_region(src)) {
1826 data_type = BRW_MATH_DATA_SCALAR;
1827 } else {
1828 data_type = BRW_MATH_DATA_VECTOR;
1829 }
1830
1831 assert(devinfo->gen < 6);
1832
1833 /* Example code doesn't set predicate_control for send
1834 * instructions.
1835 */
1836 brw_inst_set_pred_control(devinfo, insn, 0);
1837 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1838
1839 brw_set_dest(p, insn, dest);
1840 brw_set_src0(p, insn, src);
1841 brw_set_math_message(p,
1842 insn,
1843 function,
1844 src.type == BRW_REGISTER_TYPE_D,
1845 precision,
1846 data_type);
1847 }
1848
1849 void gen6_math(struct brw_codegen *p,
1850 struct brw_reg dest,
1851 unsigned function,
1852 struct brw_reg src0,
1853 struct brw_reg src1)
1854 {
1855 const struct gen_device_info *devinfo = p->devinfo;
1856 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1857
1858 assert(devinfo->gen >= 6);
1859
1860 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1861 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1862
1863 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1864 if (devinfo->gen == 6) {
1865 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1866 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1867 }
1868
1869 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1870 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1871 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1872 assert(src0.type != BRW_REGISTER_TYPE_F);
1873 assert(src1.type != BRW_REGISTER_TYPE_F);
1874 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1875 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1876 } else {
1877 assert(src0.type == BRW_REGISTER_TYPE_F);
1878 assert(src1.type == BRW_REGISTER_TYPE_F);
1879 }
1880
1881 /* Source modifiers are ignored for extended math instructions on Gen6. */
1882 if (devinfo->gen == 6) {
1883 assert(!src0.negate);
1884 assert(!src0.abs);
1885 assert(!src1.negate);
1886 assert(!src1.abs);
1887 }
1888
1889 brw_inst_set_math_function(devinfo, insn, function);
1890
1891 brw_set_dest(p, insn, dest);
1892 brw_set_src0(p, insn, src0);
1893 brw_set_src1(p, insn, src1);
1894 }
1895
1896 /**
1897 * Return the right surface index to access the thread scratch space using
1898 * stateless dataport messages.
1899 */
1900 unsigned
1901 brw_scratch_surface_idx(const struct brw_codegen *p)
1902 {
1903 /* The scratch space is thread-local so IA coherency is unnecessary. */
1904 if (p->devinfo->gen >= 8)
1905 return GEN8_BTI_STATELESS_NON_COHERENT;
1906 else
1907 return BRW_BTI_STATELESS;
1908 }
1909
1910 /**
1911 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1912 * using a constant offset per channel.
1913 *
1914 * The offset must be aligned to oword size (16 bytes). Used for
1915 * register spilling.
1916 */
1917 void brw_oword_block_write_scratch(struct brw_codegen *p,
1918 struct brw_reg mrf,
1919 int num_regs,
1920 unsigned offset)
1921 {
1922 const struct gen_device_info *devinfo = p->devinfo;
1923 const unsigned target_cache =
1924 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1925 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1926 BRW_SFID_DATAPORT_WRITE);
1927 uint32_t msg_type;
1928
1929 if (devinfo->gen >= 6)
1930 offset /= 16;
1931
1932 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1933
1934 const unsigned mlen = 1 + num_regs;
1935
1936 /* Set up the message header. This is g0, with g0.2 filled with
1937 * the offset. We don't want to leave our offset around in g0 or
1938 * it'll screw up texture samples, so set it up inside the message
1939 * reg.
1940 */
1941 {
1942 brw_push_insn_state(p);
1943 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1944 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1945 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1946
1947 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1948
1949 /* set message header global offset field (reg 0, element 2) */
1950 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1951 brw_MOV(p,
1952 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1953 mrf.nr,
1954 2), BRW_REGISTER_TYPE_UD),
1955 brw_imm_ud(offset));
1956
1957 brw_pop_insn_state(p);
1958 }
1959
1960 {
1961 struct brw_reg dest;
1962 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1963 int send_commit_msg;
1964 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1965 BRW_REGISTER_TYPE_UW);
1966
1967 brw_inst_set_sfid(devinfo, insn, target_cache);
1968 brw_inst_set_compression(devinfo, insn, false);
1969
1970 if (brw_inst_exec_size(devinfo, insn) >= 16)
1971 src_header = vec16(src_header);
1972
1973 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1974 if (devinfo->gen < 6)
1975 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
1976
1977 /* Until gen6, writes followed by reads from the same location
1978 * are not guaranteed to be ordered unless write_commit is set.
1979 * If set, then a no-op write is issued to the destination
1980 * register to set a dependency, and a read from the destination
1981 * can be used to ensure the ordering.
1982 *
1983 * For gen6, only writes between different threads need ordering
1984 * protection. Our use of DP writes is all about register
1985 * spilling within a thread.
1986 */
1987 if (devinfo->gen >= 6) {
1988 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1989 send_commit_msg = 0;
1990 } else {
1991 dest = src_header;
1992 send_commit_msg = 1;
1993 }
1994
1995 brw_set_dest(p, insn, dest);
1996 if (devinfo->gen >= 6) {
1997 brw_set_src0(p, insn, mrf);
1998 } else {
1999 brw_set_src0(p, insn, brw_null_reg());
2000 }
2001
2002 if (devinfo->gen >= 6)
2003 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2004 else
2005 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2006
2007 brw_set_desc(p, insn,
2008 brw_message_desc(devinfo, mlen, send_commit_msg, true) |
2009 brw_dp_write_desc(devinfo, brw_scratch_surface_idx(p),
2010 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2011 msg_type, 0, /* not a render target */
2012 send_commit_msg));
2013 }
2014 }
2015
2016
2017 /**
2018 * Read a block of owords (half a GRF each) from the scratch buffer
2019 * using a constant index per channel.
2020 *
2021 * Offset must be aligned to oword size (16 bytes). Used for register
2022 * spilling.
2023 */
2024 void
2025 brw_oword_block_read_scratch(struct brw_codegen *p,
2026 struct brw_reg dest,
2027 struct brw_reg mrf,
2028 int num_regs,
2029 unsigned offset)
2030 {
2031 const struct gen_device_info *devinfo = p->devinfo;
2032
2033 if (devinfo->gen >= 6)
2034 offset /= 16;
2035
2036 if (p->devinfo->gen >= 7) {
2037 /* On gen 7 and above, we no longer have message registers and we can
2038 * send from any register we want. By using the destination register
2039 * for the message, we guarantee that the implied message write won't
2040 * accidentally overwrite anything. This has been a problem because
2041 * the MRF registers and source for the final FB write are both fixed
2042 * and may overlap.
2043 */
2044 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2045 } else {
2046 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2047 }
2048 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2049
2050 const unsigned rlen = num_regs;
2051 const unsigned target_cache =
2052 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2053 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2054 BRW_SFID_DATAPORT_READ);
2055
2056 {
2057 brw_push_insn_state(p);
2058 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2059 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2060 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2061
2062 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2063
2064 /* set message header global offset field (reg 0, element 2) */
2065 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2066 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2067
2068 brw_pop_insn_state(p);
2069 }
2070
2071 {
2072 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2073
2074 brw_inst_set_sfid(devinfo, insn, target_cache);
2075 assert(brw_inst_pred_control(devinfo, insn) == 0);
2076 brw_inst_set_compression(devinfo, insn, false);
2077
2078 brw_set_dest(p, insn, dest); /* UW? */
2079 if (devinfo->gen >= 6) {
2080 brw_set_src0(p, insn, mrf);
2081 } else {
2082 brw_set_src0(p, insn, brw_null_reg());
2083 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2084 }
2085
2086 brw_set_desc(p, insn,
2087 brw_message_desc(devinfo, 1, rlen, true) |
2088 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2089 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2090 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2091 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2092 }
2093 }
2094
2095 void
2096 gen7_block_read_scratch(struct brw_codegen *p,
2097 struct brw_reg dest,
2098 int num_regs,
2099 unsigned offset)
2100 {
2101 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2102 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2103
2104 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2105
2106 /* The HW requires that the header is present; this is to get the g0.5
2107 * scratch offset.
2108 */
2109 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2110
2111 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2112 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2113 * is 32 bytes, which happens to be the size of a register.
2114 */
2115 offset /= REG_SIZE;
2116 assert(offset < (1 << 12));
2117
2118 gen7_set_dp_scratch_message(p, insn,
2119 false, /* scratch read */
2120 false, /* OWords */
2121 false, /* invalidate after read */
2122 num_regs,
2123 offset,
2124 1, /* mlen: just g0 */
2125 num_regs, /* rlen */
2126 true); /* header present */
2127 }
2128
2129 /**
2130 * Read float[4] vectors from the data port constant cache.
2131 * Location (in buffer) should be a multiple of 16.
2132 * Used for fetching shader constants.
2133 */
2134 void brw_oword_block_read(struct brw_codegen *p,
2135 struct brw_reg dest,
2136 struct brw_reg mrf,
2137 uint32_t offset,
2138 uint32_t bind_table_index)
2139 {
2140 const struct gen_device_info *devinfo = p->devinfo;
2141 const unsigned target_cache =
2142 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2143 BRW_SFID_DATAPORT_READ);
2144 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2145
2146 /* On newer hardware, offset is in units of owords. */
2147 if (devinfo->gen >= 6)
2148 offset /= 16;
2149
2150 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2151
2152 brw_push_insn_state(p);
2153 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2154 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2155 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2156
2157 brw_push_insn_state(p);
2158 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2159 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2160
2161 /* set message header global offset field (reg 0, element 2) */
2162 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2163 brw_MOV(p,
2164 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2165 mrf.nr,
2166 2), BRW_REGISTER_TYPE_UD),
2167 brw_imm_ud(offset));
2168 brw_pop_insn_state(p);
2169
2170 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2171
2172 brw_inst_set_sfid(devinfo, insn, target_cache);
2173
2174 /* cast dest to a uword[8] vector */
2175 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2176
2177 brw_set_dest(p, insn, dest);
2178 if (devinfo->gen >= 6) {
2179 brw_set_src0(p, insn, mrf);
2180 } else {
2181 brw_set_src0(p, insn, brw_null_reg());
2182 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2183 }
2184
2185 brw_set_desc(p, insn,
2186 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2187 brw_dp_read_desc(devinfo, bind_table_index,
2188 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2189 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2190 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2191
2192 brw_pop_insn_state(p);
2193 }
2194
2195 brw_inst *
2196 brw_fb_WRITE(struct brw_codegen *p,
2197 struct brw_reg payload,
2198 struct brw_reg implied_header,
2199 unsigned msg_control,
2200 unsigned binding_table_index,
2201 unsigned msg_length,
2202 unsigned response_length,
2203 bool eot,
2204 bool last_render_target,
2205 bool header_present)
2206 {
2207 const struct gen_device_info *devinfo = p->devinfo;
2208 const unsigned target_cache =
2209 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2210 BRW_SFID_DATAPORT_WRITE);
2211 brw_inst *insn;
2212 unsigned msg_type;
2213 struct brw_reg dest, src0;
2214
2215 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2216 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2217 else
2218 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2219
2220 if (devinfo->gen >= 6) {
2221 insn = next_insn(p, BRW_OPCODE_SENDC);
2222 } else {
2223 insn = next_insn(p, BRW_OPCODE_SEND);
2224 }
2225 brw_inst_set_sfid(devinfo, insn, target_cache);
2226 brw_inst_set_compression(devinfo, insn, false);
2227
2228 if (devinfo->gen >= 6) {
2229 /* headerless version, just submit color payload */
2230 src0 = payload;
2231
2232 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2233 } else {
2234 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2235 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2236 src0 = implied_header;
2237
2238 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2239 }
2240
2241 brw_set_dest(p, insn, dest);
2242 brw_set_src0(p, insn, src0);
2243 brw_set_desc(p, insn,
2244 brw_message_desc(devinfo, msg_length, response_length,
2245 header_present) |
2246 brw_dp_write_desc(devinfo, binding_table_index, msg_control,
2247 msg_type, last_render_target,
2248 0 /* send_commit_msg */));
2249 brw_inst_set_eot(devinfo, insn, eot);
2250
2251 return insn;
2252 }
2253
2254 brw_inst *
2255 gen9_fb_READ(struct brw_codegen *p,
2256 struct brw_reg dst,
2257 struct brw_reg payload,
2258 unsigned binding_table_index,
2259 unsigned msg_length,
2260 unsigned response_length,
2261 bool per_sample)
2262 {
2263 const struct gen_device_info *devinfo = p->devinfo;
2264 assert(devinfo->gen >= 9);
2265 const unsigned msg_subtype =
2266 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2267 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2268
2269 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2270 brw_set_dest(p, insn, dst);
2271 brw_set_src0(p, insn, payload);
2272 brw_set_desc(
2273 p, insn,
2274 brw_message_desc(devinfo, msg_length, response_length, true) |
2275 brw_dp_read_desc(devinfo, binding_table_index,
2276 per_sample << 5 | msg_subtype,
2277 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2278 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2279 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2280
2281 return insn;
2282 }
2283
2284 /**
2285 * Texture sample instruction.
2286 * Note: the msg_type plus msg_length values determine exactly what kind
2287 * of sampling operation is performed. See volume 4, page 161 of docs.
2288 */
2289 void brw_SAMPLE(struct brw_codegen *p,
2290 struct brw_reg dest,
2291 unsigned msg_reg_nr,
2292 struct brw_reg src0,
2293 unsigned binding_table_index,
2294 unsigned sampler,
2295 unsigned msg_type,
2296 unsigned response_length,
2297 unsigned msg_length,
2298 unsigned header_present,
2299 unsigned simd_mode,
2300 unsigned return_format)
2301 {
2302 const struct gen_device_info *devinfo = p->devinfo;
2303 brw_inst *insn;
2304
2305 if (msg_reg_nr != -1)
2306 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2307
2308 insn = next_insn(p, BRW_OPCODE_SEND);
2309 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2310 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2311
2312 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2313 *
2314 * "Instruction compression is not allowed for this instruction (that
2315 * is, send). The hardware behavior is undefined if this instruction is
2316 * set as compressed. However, compress control can be set to "SecHalf"
2317 * to affect the EMask generation."
2318 *
2319 * No similar wording is found in later PRMs, but there are examples
2320 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2321 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2322 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2323 */
2324 brw_inst_set_compression(devinfo, insn, false);
2325
2326 if (devinfo->gen < 6)
2327 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2328
2329 brw_set_dest(p, insn, dest);
2330 brw_set_src0(p, insn, src0);
2331 brw_set_desc(p, insn,
2332 brw_message_desc(devinfo, msg_length, response_length,
2333 header_present) |
2334 brw_sampler_desc(devinfo, binding_table_index, sampler,
2335 msg_type, simd_mode, return_format));
2336 }
2337
2338 /* Adjust the message header's sampler state pointer to
2339 * select the correct group of 16 samplers.
2340 */
2341 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2342 struct brw_reg header,
2343 struct brw_reg sampler_index)
2344 {
2345 /* The "Sampler Index" field can only store values between 0 and 15.
2346 * However, we can add an offset to the "Sampler State Pointer"
2347 * field, effectively selecting a different set of 16 samplers.
2348 *
2349 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2350 * offset, and each sampler state is only 16-bytes, so we can't
2351 * exclusively use the offset - we have to use both.
2352 */
2353
2354 const struct gen_device_info *devinfo = p->devinfo;
2355
2356 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2357 const int sampler_state_size = 16; /* 16 bytes */
2358 uint32_t sampler = sampler_index.ud;
2359
2360 if (sampler >= 16) {
2361 assert(devinfo->is_haswell || devinfo->gen >= 8);
2362 brw_ADD(p,
2363 get_element_ud(header, 3),
2364 get_element_ud(brw_vec8_grf(0, 0), 3),
2365 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2366 }
2367 } else {
2368 /* Non-const sampler array indexing case */
2369 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2370 return;
2371 }
2372
2373 struct brw_reg temp = get_element_ud(header, 3);
2374
2375 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2376 brw_SHL(p, temp, temp, brw_imm_ud(4));
2377 brw_ADD(p,
2378 get_element_ud(header, 3),
2379 get_element_ud(brw_vec8_grf(0, 0), 3),
2380 temp);
2381 }
2382 }
2383
2384 /* All these variables are pretty confusing - we might be better off
2385 * using bitmasks and macros for this, in the old style. Or perhaps
2386 * just having the caller instantiate the fields in dword3 itself.
2387 */
2388 void brw_urb_WRITE(struct brw_codegen *p,
2389 struct brw_reg dest,
2390 unsigned msg_reg_nr,
2391 struct brw_reg src0,
2392 enum brw_urb_write_flags flags,
2393 unsigned msg_length,
2394 unsigned response_length,
2395 unsigned offset,
2396 unsigned swizzle)
2397 {
2398 const struct gen_device_info *devinfo = p->devinfo;
2399 brw_inst *insn;
2400
2401 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2402
2403 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2404 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2405 brw_push_insn_state(p);
2406 brw_set_default_access_mode(p, BRW_ALIGN_1);
2407 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2408 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2409 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2410 BRW_REGISTER_TYPE_UD),
2411 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2412 brw_imm_ud(0xff00));
2413 brw_pop_insn_state(p);
2414 }
2415
2416 insn = next_insn(p, BRW_OPCODE_SEND);
2417
2418 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2419
2420 brw_set_dest(p, insn, dest);
2421 brw_set_src0(p, insn, src0);
2422 brw_set_src1(p, insn, brw_imm_d(0));
2423
2424 if (devinfo->gen < 6)
2425 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2426
2427 brw_set_urb_message(p,
2428 insn,
2429 flags,
2430 msg_length,
2431 response_length,
2432 offset,
2433 swizzle);
2434 }
2435
2436 void
2437 brw_send_indirect_message(struct brw_codegen *p,
2438 unsigned sfid,
2439 struct brw_reg dst,
2440 struct brw_reg payload,
2441 struct brw_reg desc,
2442 unsigned desc_imm)
2443 {
2444 const struct gen_device_info *devinfo = p->devinfo;
2445 struct brw_inst *send;
2446
2447 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2448
2449 assert(desc.type == BRW_REGISTER_TYPE_UD);
2450
2451 if (desc.file == BRW_IMMEDIATE_VALUE) {
2452 send = next_insn(p, BRW_OPCODE_SEND);
2453 brw_set_desc(p, send, desc.ud | desc_imm);
2454
2455 } else {
2456 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2457
2458 brw_push_insn_state(p);
2459 brw_set_default_access_mode(p, BRW_ALIGN_1);
2460 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2461 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2462 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2463
2464 /* Load the indirect descriptor to an address register using OR so the
2465 * caller can specify additional descriptor bits with the desc_imm
2466 * immediate.
2467 */
2468 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2469
2470 brw_pop_insn_state(p);
2471
2472 send = next_insn(p, BRW_OPCODE_SEND);
2473 brw_set_src1(p, send, addr);
2474 }
2475
2476 brw_set_dest(p, send, dst);
2477 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2478 brw_inst_set_sfid(devinfo, send, sfid);
2479 }
2480
2481 static void
2482 brw_send_indirect_surface_message(struct brw_codegen *p,
2483 unsigned sfid,
2484 struct brw_reg dst,
2485 struct brw_reg payload,
2486 struct brw_reg surface,
2487 unsigned desc_imm)
2488 {
2489 if (surface.file != BRW_IMMEDIATE_VALUE) {
2490 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2491
2492 brw_push_insn_state(p);
2493 brw_set_default_access_mode(p, BRW_ALIGN_1);
2494 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2495 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2496 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2497
2498 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2499 * some surface array is accessed out of bounds.
2500 */
2501 brw_AND(p, addr,
2502 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2503 BRW_GET_SWZ(surface.swizzle, 0)),
2504 brw_imm_ud(0xff));
2505
2506 brw_pop_insn_state(p);
2507
2508 surface = addr;
2509 }
2510
2511 brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm);
2512 }
2513
2514 static bool
2515 while_jumps_before_offset(const struct gen_device_info *devinfo,
2516 brw_inst *insn, int while_offset, int start_offset)
2517 {
2518 int scale = 16 / brw_jump_scale(devinfo);
2519 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2520 : brw_inst_jip(devinfo, insn);
2521 assert(jip < 0);
2522 return while_offset + jip * scale <= start_offset;
2523 }
2524
2525
2526 static int
2527 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2528 {
2529 int offset;
2530 void *store = p->store;
2531 const struct gen_device_info *devinfo = p->devinfo;
2532
2533 int depth = 0;
2534
2535 for (offset = next_offset(devinfo, store, start_offset);
2536 offset < p->next_insn_offset;
2537 offset = next_offset(devinfo, store, offset)) {
2538 brw_inst *insn = store + offset;
2539
2540 switch (brw_inst_opcode(devinfo, insn)) {
2541 case BRW_OPCODE_IF:
2542 depth++;
2543 break;
2544 case BRW_OPCODE_ENDIF:
2545 if (depth == 0)
2546 return offset;
2547 depth--;
2548 break;
2549 case BRW_OPCODE_WHILE:
2550 /* If the while doesn't jump before our instruction, it's the end
2551 * of a sibling do...while loop. Ignore it.
2552 */
2553 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2554 continue;
2555 /* fallthrough */
2556 case BRW_OPCODE_ELSE:
2557 case BRW_OPCODE_HALT:
2558 if (depth == 0)
2559 return offset;
2560 }
2561 }
2562
2563 return 0;
2564 }
2565
2566 /* There is no DO instruction on gen6, so to find the end of the loop
2567 * we have to see if the loop is jumping back before our start
2568 * instruction.
2569 */
2570 static int
2571 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2572 {
2573 const struct gen_device_info *devinfo = p->devinfo;
2574 int offset;
2575 void *store = p->store;
2576
2577 assert(devinfo->gen >= 6);
2578
2579 /* Always start after the instruction (such as a WHILE) we're trying to fix
2580 * up.
2581 */
2582 for (offset = next_offset(devinfo, store, start_offset);
2583 offset < p->next_insn_offset;
2584 offset = next_offset(devinfo, store, offset)) {
2585 brw_inst *insn = store + offset;
2586
2587 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2588 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2589 return offset;
2590 }
2591 }
2592 assert(!"not reached");
2593 return start_offset;
2594 }
2595
2596 /* After program generation, go back and update the UIP and JIP of
2597 * BREAK, CONT, and HALT instructions to their correct locations.
2598 */
2599 void
2600 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2601 {
2602 const struct gen_device_info *devinfo = p->devinfo;
2603 int offset;
2604 int br = brw_jump_scale(devinfo);
2605 int scale = 16 / br;
2606 void *store = p->store;
2607
2608 if (devinfo->gen < 6)
2609 return;
2610
2611 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2612 brw_inst *insn = store + offset;
2613 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2614
2615 int block_end_offset = brw_find_next_block_end(p, offset);
2616 switch (brw_inst_opcode(devinfo, insn)) {
2617 case BRW_OPCODE_BREAK:
2618 assert(block_end_offset != 0);
2619 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2620 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2621 brw_inst_set_uip(devinfo, insn,
2622 (brw_find_loop_end(p, offset) - offset +
2623 (devinfo->gen == 6 ? 16 : 0)) / scale);
2624 break;
2625 case BRW_OPCODE_CONTINUE:
2626 assert(block_end_offset != 0);
2627 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2628 brw_inst_set_uip(devinfo, insn,
2629 (brw_find_loop_end(p, offset) - offset) / scale);
2630
2631 assert(brw_inst_uip(devinfo, insn) != 0);
2632 assert(brw_inst_jip(devinfo, insn) != 0);
2633 break;
2634
2635 case BRW_OPCODE_ENDIF: {
2636 int32_t jump = (block_end_offset == 0) ?
2637 1 * br : (block_end_offset - offset) / scale;
2638 if (devinfo->gen >= 7)
2639 brw_inst_set_jip(devinfo, insn, jump);
2640 else
2641 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2642 break;
2643 }
2644
2645 case BRW_OPCODE_HALT:
2646 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2647 *
2648 * "In case of the halt instruction not inside any conditional
2649 * code block, the value of <JIP> and <UIP> should be the
2650 * same. In case of the halt instruction inside conditional code
2651 * block, the <UIP> should be the end of the program, and the
2652 * <JIP> should be end of the most inner conditional code block."
2653 *
2654 * The uip will have already been set by whoever set up the
2655 * instruction.
2656 */
2657 if (block_end_offset == 0) {
2658 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2659 } else {
2660 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2661 }
2662 assert(brw_inst_uip(devinfo, insn) != 0);
2663 assert(brw_inst_jip(devinfo, insn) != 0);
2664 break;
2665 }
2666 }
2667 }
2668
2669 void brw_ff_sync(struct brw_codegen *p,
2670 struct brw_reg dest,
2671 unsigned msg_reg_nr,
2672 struct brw_reg src0,
2673 bool allocate,
2674 unsigned response_length,
2675 bool eot)
2676 {
2677 const struct gen_device_info *devinfo = p->devinfo;
2678 brw_inst *insn;
2679
2680 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2681
2682 insn = next_insn(p, BRW_OPCODE_SEND);
2683 brw_set_dest(p, insn, dest);
2684 brw_set_src0(p, insn, src0);
2685 brw_set_src1(p, insn, brw_imm_d(0));
2686
2687 if (devinfo->gen < 6)
2688 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2689
2690 brw_set_ff_sync_message(p,
2691 insn,
2692 allocate,
2693 response_length,
2694 eot);
2695 }
2696
2697 /**
2698 * Emit the SEND instruction necessary to generate stream output data on Gen6
2699 * (for transform feedback).
2700 *
2701 * If send_commit_msg is true, this is the last piece of stream output data
2702 * from this thread, so send the data as a committed write. According to the
2703 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2704 *
2705 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2706 * writes are complete by sending the final write as a committed write."
2707 */
2708 void
2709 brw_svb_write(struct brw_codegen *p,
2710 struct brw_reg dest,
2711 unsigned msg_reg_nr,
2712 struct brw_reg src0,
2713 unsigned binding_table_index,
2714 bool send_commit_msg)
2715 {
2716 const struct gen_device_info *devinfo = p->devinfo;
2717 const unsigned target_cache =
2718 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2719 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2720 BRW_SFID_DATAPORT_WRITE);
2721 brw_inst *insn;
2722
2723 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2724
2725 insn = next_insn(p, BRW_OPCODE_SEND);
2726 brw_inst_set_sfid(devinfo, insn, target_cache);
2727 brw_set_dest(p, insn, dest);
2728 brw_set_src0(p, insn, src0);
2729 brw_set_desc(p, insn,
2730 brw_message_desc(devinfo, 1, send_commit_msg, true) |
2731 brw_dp_write_desc(devinfo, binding_table_index,
2732 0, /* msg_control: ignored */
2733 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2734 0, /* last_render_target: ignored */
2735 send_commit_msg)); /* send_commit_msg */
2736 }
2737
2738 static unsigned
2739 brw_surface_payload_size(struct brw_codegen *p,
2740 unsigned num_channels,
2741 unsigned exec_size /**< 0 for SIMD4x2 */)
2742 {
2743 if (exec_size == 0)
2744 return 1; /* SIMD4x2 */
2745 else if (exec_size <= 8)
2746 return num_channels;
2747 else
2748 return 2 * num_channels;
2749 }
2750
2751 static uint32_t
2752 brw_dp_untyped_atomic_desc(struct brw_codegen *p,
2753 unsigned atomic_op,
2754 bool response_expected)
2755 {
2756 const struct gen_device_info *devinfo = p->devinfo;
2757 unsigned msg_control =
2758 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2759 (response_expected ? 1 << 5 : 0); /* Return data expected */
2760 unsigned msg_type;
2761
2762 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2763 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2764 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2765 msg_control |= 1 << 4; /* SIMD8 mode */
2766
2767 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP;
2768 } else {
2769 msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2;
2770 }
2771 } else {
2772 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2773 msg_control |= 1 << 4; /* SIMD8 mode */
2774
2775 msg_type = GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP;
2776 }
2777
2778 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2779 }
2780
2781 void
2782 brw_untyped_atomic(struct brw_codegen *p,
2783 struct brw_reg dst,
2784 struct brw_reg payload,
2785 struct brw_reg surface,
2786 unsigned atomic_op,
2787 unsigned msg_length,
2788 bool response_expected,
2789 bool header_present)
2790 {
2791 const struct gen_device_info *devinfo = p->devinfo;
2792 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2793 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2794 GEN7_SFID_DATAPORT_DATA_CACHE);
2795 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2796 /* SIMD4x2 untyped atomic instructions only exist on HSW+ */
2797 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
2798 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) :
2799 has_simd4x2 ? 0 : 8;
2800 const unsigned response_length =
2801 brw_surface_payload_size(p, response_expected, exec_size);
2802 const unsigned desc =
2803 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2804 brw_dp_untyped_atomic_desc(p, atomic_op, response_expected);
2805 /* Mask out unused components -- This is especially important in Align16
2806 * mode on generations that don't have native support for SIMD4x2 atomics,
2807 * because unused but enabled components will cause the dataport to perform
2808 * additional atomic operations on the addresses that happen to be in the
2809 * uninitialized Y, Z and W coordinates of the payload.
2810 */
2811 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2812
2813 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
2814 payload, surface, desc);
2815 }
2816
2817 static uint32_t
2818 brw_dp_untyped_atomic_float_desc(struct brw_codegen *p,
2819 unsigned atomic_op,
2820 bool response_expected)
2821 {
2822 const struct gen_device_info *devinfo = p->devinfo;
2823 const unsigned msg_type = GEN9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP;
2824 unsigned msg_control =
2825 atomic_op | /* Atomic Operation Type: BRW_AOP_F* */
2826 (response_expected ? 1 << 5 : 0); /* Return data expected */
2827
2828 assert(devinfo->gen >= 9);
2829 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2830
2831 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2832 msg_control |= 1 << 4; /* SIMD8 mode */
2833
2834 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2835 }
2836
2837 void
2838 brw_untyped_atomic_float(struct brw_codegen *p,
2839 struct brw_reg dst,
2840 struct brw_reg payload,
2841 struct brw_reg surface,
2842 unsigned atomic_op,
2843 unsigned msg_length,
2844 bool response_expected,
2845 bool header_present)
2846 {
2847 const struct gen_device_info *devinfo = p->devinfo;
2848
2849 assert(devinfo->gen >= 9);
2850 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2851
2852 const unsigned sfid = HSW_SFID_DATAPORT_DATA_CACHE_1;
2853 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2854 const unsigned response_length =
2855 brw_surface_payload_size(p, response_expected, exec_size);
2856 const unsigned desc =
2857 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2858 brw_dp_untyped_atomic_float_desc(p, atomic_op, response_expected);
2859
2860 brw_send_indirect_surface_message(p, sfid,
2861 brw_writemask(dst, WRITEMASK_XYZW),
2862 payload, surface, desc);
2863 }
2864
2865 static uint32_t
2866 brw_dp_untyped_surface_read_desc(struct brw_codegen *p,
2867 unsigned num_channels)
2868 {
2869 const struct gen_device_info *devinfo = p->devinfo;
2870 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2871 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2872 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ);
2873 /* Set mask of 32-bit channels to drop. */
2874 unsigned msg_control = 0xf & (0xf << num_channels);
2875
2876 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2877 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2878 msg_control |= 1 << 4; /* SIMD16 mode */
2879 else
2880 msg_control |= 2 << 4; /* SIMD8 mode */
2881 }
2882
2883 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2884 }
2885
2886 void
2887 brw_untyped_surface_read(struct brw_codegen *p,
2888 struct brw_reg dst,
2889 struct brw_reg payload,
2890 struct brw_reg surface,
2891 unsigned msg_length,
2892 unsigned num_channels)
2893 {
2894 const struct gen_device_info *devinfo = p->devinfo;
2895 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2896 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2897 GEN7_SFID_DATAPORT_DATA_CACHE);
2898 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2899 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) : 0;
2900 const unsigned response_length =
2901 brw_surface_payload_size(p, num_channels, exec_size);
2902 const unsigned desc =
2903 brw_message_desc(devinfo, msg_length, response_length, false) |
2904 brw_dp_untyped_surface_read_desc(p, num_channels);
2905
2906 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
2907 }
2908
2909 static uint32_t
2910 brw_dp_untyped_surface_write_desc(struct brw_codegen *p,
2911 unsigned num_channels)
2912 {
2913 const struct gen_device_info *devinfo = p->devinfo;
2914 const unsigned msg_type = (devinfo->gen >= 8 || devinfo->is_haswell ?
2915 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2916 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2917 /* Set mask of 32-bit channels to drop. */
2918 unsigned msg_control = 0xf & (0xf << num_channels);
2919
2920 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2921 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2922 msg_control |= 1 << 4; /* SIMD16 mode */
2923 else
2924 msg_control |= 2 << 4; /* SIMD8 mode */
2925 } else {
2926 if (devinfo->gen >= 8 || devinfo->is_haswell)
2927 msg_control |= 0 << 4; /* SIMD4x2 mode */
2928 else
2929 msg_control |= 2 << 4; /* SIMD8 mode */
2930 }
2931
2932 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2933 }
2934
2935 void
2936 brw_untyped_surface_write(struct brw_codegen *p,
2937 struct brw_reg payload,
2938 struct brw_reg surface,
2939 unsigned msg_length,
2940 unsigned num_channels,
2941 bool header_present)
2942 {
2943 const struct gen_device_info *devinfo = p->devinfo;
2944 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2945 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2946 GEN7_SFID_DATAPORT_DATA_CACHE);
2947 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2948 /* SIMD4x2 untyped surface write instructions only exist on HSW+ */
2949 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
2950 const unsigned desc =
2951 brw_message_desc(devinfo, msg_length, 0, header_present) |
2952 brw_dp_untyped_surface_write_desc(p, num_channels);
2953 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2954 const unsigned mask = !has_simd4x2 && !align1 ? WRITEMASK_X : WRITEMASK_XYZW;
2955
2956 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
2957 payload, surface, desc);
2958 }
2959
2960 static unsigned
2961 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
2962 {
2963 switch (bit_size) {
2964 case 8:
2965 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
2966 case 16:
2967 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
2968 case 32:
2969 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
2970 default:
2971 unreachable("Unsupported bit_size for byte scattered messages");
2972 }
2973 }
2974
2975 static uint32_t
2976 brw_dp_byte_scattered_desc(struct brw_codegen *p, unsigned bit_size,
2977 unsigned msg_type)
2978 {
2979 const struct gen_device_info *devinfo = p->devinfo;
2980 unsigned msg_control =
2981 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
2982
2983 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2984 msg_control |= 1; /* SIMD16 mode */
2985 else
2986 msg_control |= 0; /* SIMD8 mode */
2987
2988 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
2989 }
2990
2991 void
2992 brw_byte_scattered_read(struct brw_codegen *p,
2993 struct brw_reg dst,
2994 struct brw_reg payload,
2995 struct brw_reg surface,
2996 unsigned msg_length,
2997 unsigned bit_size)
2998 {
2999 const struct gen_device_info *devinfo = p->devinfo;
3000 assert(devinfo->gen > 7 || devinfo->is_haswell);
3001 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3002 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3003 const unsigned response_length = brw_surface_payload_size(p, 1, exec_size);
3004 const unsigned desc =
3005 brw_message_desc(devinfo, msg_length, response_length, false) |
3006 brw_dp_byte_scattered_desc(p, bit_size,
3007 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3008
3009 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
3010 dst, payload, surface, desc);
3011 }
3012
3013 void
3014 brw_byte_scattered_write(struct brw_codegen *p,
3015 struct brw_reg payload,
3016 struct brw_reg surface,
3017 unsigned msg_length,
3018 unsigned bit_size,
3019 bool header_present)
3020 {
3021 const struct gen_device_info *devinfo = p->devinfo;
3022 assert(devinfo->gen > 7 || devinfo->is_haswell);
3023 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3024 const unsigned desc =
3025 brw_message_desc(devinfo, msg_length, 0, header_present) |
3026 brw_dp_byte_scattered_desc(p, bit_size,
3027 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3028
3029 brw_send_indirect_surface_message(p, GEN7_SFID_DATAPORT_DATA_CACHE,
3030 brw_writemask(brw_null_reg(),
3031 WRITEMASK_XYZW),
3032 payload, surface, desc);
3033 }
3034
3035 static uint32_t
3036 brw_dp_typed_atomic_desc(struct brw_codegen *p,
3037 unsigned atomic_op,
3038 bool response_expected)
3039 {
3040 const struct gen_device_info *devinfo = p->devinfo;
3041 unsigned msg_control =
3042 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3043 (response_expected ? 1 << 5 : 0); /* Return data expected */
3044 unsigned msg_type;
3045
3046 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3047 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3048 if ((brw_get_default_group(p) / 8) % 2 == 1)
3049 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3050
3051 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP;
3052 } else {
3053 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2;
3054 }
3055
3056 } else {
3057 if ((brw_get_default_group(p) / 8) % 2 == 1)
3058 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3059
3060 msg_type = GEN7_DATAPORT_RC_TYPED_ATOMIC_OP;
3061 }
3062
3063 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3064 }
3065
3066 void
3067 brw_typed_atomic(struct brw_codegen *p,
3068 struct brw_reg dst,
3069 struct brw_reg payload,
3070 struct brw_reg surface,
3071 unsigned atomic_op,
3072 unsigned msg_length,
3073 bool response_expected,
3074 bool header_present) {
3075 const struct gen_device_info *devinfo = p->devinfo;
3076 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3077 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3078 GEN6_SFID_DATAPORT_RENDER_CACHE);
3079 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3080 /* SIMD4x2 typed atomic instructions only exist on HSW+ */
3081 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
3082 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) :
3083 has_simd4x2 ? 0 : 8;
3084 /* Typed atomics don't support SIMD16 */
3085 assert(exec_size <= 8);
3086 const unsigned response_length =
3087 brw_surface_payload_size(p, response_expected, exec_size);
3088 const unsigned desc =
3089 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3090 brw_dp_typed_atomic_desc(p, atomic_op, response_expected);
3091 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3092 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3093
3094 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
3095 payload, surface, desc);
3096 }
3097
3098 static uint32_t
3099 brw_dp_typed_surface_read_desc(struct brw_codegen *p,
3100 unsigned num_channels)
3101 {
3102 const struct gen_device_info *devinfo = p->devinfo;
3103 /* Set mask of unused channels. */
3104 unsigned msg_control = 0xf & (0xf << num_channels);
3105 unsigned msg_type;
3106
3107 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3108 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3109 if ((brw_get_default_group(p) / 8) % 2 == 1)
3110 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3111 else
3112 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3113 }
3114
3115 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ;
3116 } else {
3117 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3118 if ((brw_get_default_group(p) / 8) % 2 == 1)
3119 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3120 }
3121
3122 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_READ;
3123 }
3124
3125 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3126 }
3127
3128 void
3129 brw_typed_surface_read(struct brw_codegen *p,
3130 struct brw_reg dst,
3131 struct brw_reg payload,
3132 struct brw_reg surface,
3133 unsigned msg_length,
3134 unsigned num_channels,
3135 bool header_present)
3136 {
3137 const struct gen_device_info *devinfo = p->devinfo;
3138 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3139 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3140 GEN6_SFID_DATAPORT_RENDER_CACHE);
3141 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3142 /* SIMD4x2 typed read instructions only exist on HSW+ */
3143 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
3144 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) :
3145 has_simd4x2 ? 0 : 8;
3146 /* Typed surface reads don't support SIMD16 */
3147 assert(exec_size <= 8);
3148 const unsigned response_length =
3149 brw_surface_payload_size(p, num_channels, exec_size);
3150 const unsigned desc =
3151 brw_message_desc(devinfo, msg_length, response_length, header_present) |
3152 brw_dp_typed_surface_read_desc(p, num_channels);
3153
3154 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
3155 }
3156
3157 static uint32_t
3158 brw_dp_typed_surface_write_desc(struct brw_codegen *p,
3159 unsigned num_channels)
3160 {
3161 const struct gen_device_info *devinfo = p->devinfo;
3162 /* Set mask of unused channels. */
3163 unsigned msg_control = 0xf & (0xf << num_channels);
3164 unsigned msg_type;
3165
3166 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3167 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3168 if ((brw_get_default_group(p) / 8) % 2 == 1)
3169 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3170 else
3171 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3172 }
3173
3174 msg_type = HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE;
3175
3176 } else {
3177 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3178 if ((brw_get_default_group(p) / 8) % 2 == 1)
3179 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3180 }
3181
3182 msg_type = GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE;
3183 }
3184
3185 return brw_dp_surface_desc(devinfo, msg_type, msg_control);
3186 }
3187
3188 void
3189 brw_typed_surface_write(struct brw_codegen *p,
3190 struct brw_reg payload,
3191 struct brw_reg surface,
3192 unsigned msg_length,
3193 unsigned num_channels,
3194 bool header_present)
3195 {
3196 const struct gen_device_info *devinfo = p->devinfo;
3197 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3198 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3199 GEN6_SFID_DATAPORT_RENDER_CACHE);
3200 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3201 /* SIMD4x2 typed read instructions only exist on HSW+ */
3202 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
3203 const unsigned desc =
3204 brw_message_desc(devinfo, msg_length, 0, header_present) |
3205 brw_dp_typed_surface_write_desc(p, num_channels);
3206 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3207 const unsigned mask = !has_simd4x2 && !align1 ? WRITEMASK_X : WRITEMASK_XYZW;
3208
3209 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
3210 payload, surface, desc);
3211 }
3212
3213 static void
3214 brw_set_memory_fence_message(struct brw_codegen *p,
3215 struct brw_inst *insn,
3216 enum brw_message_target sfid,
3217 bool commit_enable)
3218 {
3219 const struct gen_device_info *devinfo = p->devinfo;
3220
3221 brw_set_desc(p, insn, brw_message_desc(
3222 devinfo, 1, (commit_enable ? 1 : 0), true));
3223
3224 brw_inst_set_sfid(devinfo, insn, sfid);
3225
3226 switch (sfid) {
3227 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3228 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3229 break;
3230 case GEN7_SFID_DATAPORT_DATA_CACHE:
3231 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3232 break;
3233 default:
3234 unreachable("Not reached");
3235 }
3236
3237 if (commit_enable)
3238 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3239 }
3240
3241 void
3242 brw_memory_fence(struct brw_codegen *p,
3243 struct brw_reg dst,
3244 enum opcode send_op)
3245 {
3246 const struct gen_device_info *devinfo = p->devinfo;
3247 const bool commit_enable =
3248 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3249 (devinfo->gen == 7 && !devinfo->is_haswell);
3250 struct brw_inst *insn;
3251
3252 brw_push_insn_state(p);
3253 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3254 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3255 dst = vec1(dst);
3256
3257 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3258 * message doesn't write anything back.
3259 */
3260 insn = next_insn(p, send_op);
3261 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3262 brw_set_dest(p, insn, dst);
3263 brw_set_src0(p, insn, dst);
3264 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3265 commit_enable);
3266
3267 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3268 /* IVB does typed surface access through the render cache, so we need to
3269 * flush it too. Use a different register so both flushes can be
3270 * pipelined by the hardware.
3271 */
3272 insn = next_insn(p, send_op);
3273 brw_set_dest(p, insn, offset(dst, 1));
3274 brw_set_src0(p, insn, offset(dst, 1));
3275 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3276 commit_enable);
3277
3278 /* Now write the response of the second message into the response of the
3279 * first to trigger a pipeline stall -- This way future render and data
3280 * cache messages will be properly ordered with respect to past data and
3281 * render cache messages.
3282 */
3283 brw_MOV(p, dst, offset(dst, 1));
3284 }
3285
3286 brw_pop_insn_state(p);
3287 }
3288
3289 void
3290 brw_pixel_interpolator_query(struct brw_codegen *p,
3291 struct brw_reg dest,
3292 struct brw_reg mrf,
3293 bool noperspective,
3294 unsigned mode,
3295 struct brw_reg data,
3296 unsigned msg_length,
3297 unsigned response_length)
3298 {
3299 const struct gen_device_info *devinfo = p->devinfo;
3300 const uint16_t exec_size = brw_get_default_exec_size(p);
3301 const unsigned slot_group = brw_get_default_group(p) / 16;
3302 const unsigned simd_mode = (exec_size == BRW_EXECUTE_16);
3303 const unsigned desc =
3304 brw_message_desc(devinfo, msg_length, response_length, false) |
3305 brw_pixel_interp_desc(devinfo, mode, noperspective, simd_mode,
3306 slot_group);
3307
3308 /* brw_send_indirect_message will automatically use a direct send message
3309 * if data is actually immediate.
3310 */
3311 brw_send_indirect_message(p,
3312 GEN7_SFID_PIXEL_INTERPOLATOR,
3313 dest,
3314 mrf,
3315 vec1(data),
3316 desc);
3317 }
3318
3319 void
3320 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3321 struct brw_reg mask)
3322 {
3323 const struct gen_device_info *devinfo = p->devinfo;
3324 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3325 const unsigned qtr_control = brw_get_default_group(p) / 8;
3326 brw_inst *inst;
3327
3328 assert(devinfo->gen >= 7);
3329 assert(mask.type == BRW_REGISTER_TYPE_UD);
3330
3331 brw_push_insn_state(p);
3332
3333 /* The flag register is only used on Gen7 in align1 mode, so avoid setting
3334 * unnecessary bits in the instruction words, get the information we need
3335 * and reset the default flag register. This allows more instructions to be
3336 * compacted.
3337 */
3338 const unsigned flag_subreg = p->current->flag_subreg;
3339 brw_set_default_flag_reg(p, 0, 0);
3340
3341 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3342 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3343
3344 if (devinfo->gen >= 8) {
3345 /* Getting the first active channel index is easy on Gen8: Just find
3346 * the first bit set in the execution mask. The register exists on
3347 * HSW already but it reads back as all ones when the current
3348 * instruction has execution masking disabled, so it's kind of
3349 * useless.
3350 */
3351 struct brw_reg exec_mask =
3352 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3353
3354 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3355 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3356 /* Unfortunately, ce0 does not take into account the thread
3357 * dispatch mask, which may be a problem in cases where it's not
3358 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3359 * some n). Combine ce0 with the given dispatch (or vector) mask
3360 * to mask off those channels which were never dispatched by the
3361 * hardware.
3362 */
3363 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3364 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3365 exec_mask = vec1(dst);
3366 }
3367
3368 /* Quarter control has the effect of magically shifting the value of
3369 * ce0 so you'll get the first active channel relative to the
3370 * specified quarter control as result.
3371 */
3372 inst = brw_FBL(p, vec1(dst), exec_mask);
3373 } else {
3374 const struct brw_reg flag = brw_flag_subreg(flag_subreg);
3375
3376 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3377 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3378
3379 /* Run enough instructions returning zero with execution masking and
3380 * a conditional modifier enabled in order to get the full execution
3381 * mask in f1.0. We could use a single 32-wide move here if it
3382 * weren't because of the hardware bug that causes channel enables to
3383 * be applied incorrectly to the second half of 32-wide instructions
3384 * on Gen7.
3385 */
3386 const unsigned lower_size = MIN2(16, exec_size);
3387 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3388 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3389 brw_imm_uw(0));
3390 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3391 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3392 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3393 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3394 brw_inst_set_flag_reg_nr(devinfo, inst, flag_subreg / 2);
3395 brw_inst_set_flag_subreg_nr(devinfo, inst, flag_subreg % 2);
3396 }
3397
3398 /* Find the first bit set in the exec_size-wide portion of the flag
3399 * register that was updated by the last sequence of MOV
3400 * instructions.
3401 */
3402 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3403 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3404 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3405 }
3406 } else {
3407 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3408
3409 if (devinfo->gen >= 8 &&
3410 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3411 /* In SIMD4x2 mode the first active channel index is just the
3412 * negation of the first bit of the mask register. Note that ce0
3413 * doesn't take into account the dispatch mask, so the Gen7 path
3414 * should be used instead unless you have the guarantee that the
3415 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3416 * for some n).
3417 */
3418 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3419 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3420 brw_imm_ud(1));
3421
3422 } else {
3423 /* Overwrite the destination without and with execution masking to
3424 * find out which of the channels is active.
3425 */
3426 brw_push_insn_state(p);
3427 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3428 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3429 brw_imm_ud(1));
3430
3431 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3432 brw_imm_ud(0));
3433 brw_pop_insn_state(p);
3434 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3435 }
3436 }
3437
3438 brw_pop_insn_state(p);
3439 }
3440
3441 void
3442 brw_broadcast(struct brw_codegen *p,
3443 struct brw_reg dst,
3444 struct brw_reg src,
3445 struct brw_reg idx)
3446 {
3447 const struct gen_device_info *devinfo = p->devinfo;
3448 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3449 brw_inst *inst;
3450
3451 brw_push_insn_state(p);
3452 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3453 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3454
3455 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3456 src.address_mode == BRW_ADDRESS_DIRECT);
3457 assert(!src.abs && !src.negate);
3458 assert(src.type == dst.type);
3459
3460 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3461 idx.file == BRW_IMMEDIATE_VALUE) {
3462 /* Trivial, the source is already uniform or the index is a constant.
3463 * We will typically not get here if the optimizer is doing its job, but
3464 * asserting would be mean.
3465 */
3466 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3467 brw_MOV(p, dst,
3468 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3469 stride(suboffset(src, 4 * i), 0, 4, 1)));
3470 } else {
3471 /* From the Haswell PRM section "Register Region Restrictions":
3472 *
3473 * "The lower bits of the AddressImmediate must not overflow to
3474 * change the register address. The lower 5 bits of Address
3475 * Immediate when added to lower 5 bits of address register gives
3476 * the sub-register offset. The upper bits of Address Immediate
3477 * when added to upper bits of address register gives the register
3478 * address. Any overflow from sub-register offset is dropped."
3479 *
3480 * Fortunately, for broadcast, we never have a sub-register offset so
3481 * this isn't an issue.
3482 */
3483 assert(src.subnr == 0);
3484
3485 if (align1) {
3486 const struct brw_reg addr =
3487 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3488 unsigned offset = src.nr * REG_SIZE + src.subnr;
3489 /* Limit in bytes of the signed indirect addressing immediate. */
3490 const unsigned limit = 512;
3491
3492 brw_push_insn_state(p);
3493 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3494 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3495
3496 /* Take into account the component size and horizontal stride. */
3497 assert(src.vstride == src.hstride + src.width);
3498 brw_SHL(p, addr, vec1(idx),
3499 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3500 src.hstride - 1));
3501
3502 /* We can only address up to limit bytes using the indirect
3503 * addressing immediate, account for the difference if the source
3504 * register is above this limit.
3505 */
3506 if (offset >= limit) {
3507 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3508 offset = offset % limit;
3509 }
3510
3511 brw_pop_insn_state(p);
3512
3513 /* Use indirect addressing to fetch the specified component. */
3514 if (type_sz(src.type) > 4 &&
3515 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3516 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3517 *
3518 * "When source or destination datatype is 64b or operation is
3519 * integer DWord multiply, indirect addressing must not be
3520 * used."
3521 *
3522 * To work around both of this issue, we do two integer MOVs
3523 * insead of one 64-bit MOV. Because no double value should ever
3524 * cross a register boundary, it's safe to use the immediate
3525 * offset in the indirect here to handle adding 4 bytes to the
3526 * offset and avoid the extra ADD to the register file.
3527 */
3528 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3529 retype(brw_vec1_indirect(addr.subnr, offset),
3530 BRW_REGISTER_TYPE_D));
3531 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3532 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3533 BRW_REGISTER_TYPE_D));
3534 } else {
3535 brw_MOV(p, dst,
3536 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3537 }
3538 } else {
3539 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3540 * to all bits of a flag register,
3541 */
3542 inst = brw_MOV(p,
3543 brw_null_reg(),
3544 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3545 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3546 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3547 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3548
3549 /* and use predicated SEL to pick the right channel. */
3550 inst = brw_SEL(p, dst,
3551 stride(suboffset(src, 4), 4, 4, 1),
3552 stride(src, 4, 4, 1));
3553 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3554 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3555 }
3556 }
3557
3558 brw_pop_insn_state(p);
3559 }
3560
3561 /**
3562 * This instruction is generated as a single-channel align1 instruction by
3563 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3564 *
3565 * We can't use the typed atomic op in the FS because that has the execution
3566 * mask ANDed with the pixel mask, but we just want to write the one dword for
3567 * all the pixels.
3568 *
3569 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3570 * one u32. So we use the same untyped atomic write message as the pixel
3571 * shader.
3572 *
3573 * The untyped atomic operation requires a BUFFER surface type with RAW
3574 * format, and is only accessible through the legacy DATA_CACHE dataport
3575 * messages.
3576 */
3577 void brw_shader_time_add(struct brw_codegen *p,
3578 struct brw_reg payload,
3579 uint32_t surf_index)
3580 {
3581 const struct gen_device_info *devinfo = p->devinfo;
3582 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3583 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3584 GEN7_SFID_DATAPORT_DATA_CACHE);
3585 assert(devinfo->gen >= 7);
3586
3587 brw_push_insn_state(p);
3588 brw_set_default_access_mode(p, BRW_ALIGN_1);
3589 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3590 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3591 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3592
3593 /* We use brw_vec1_reg and unmasked because we want to increment the given
3594 * offset only once.
3595 */
3596 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3597 BRW_ARF_NULL, 0));
3598 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3599 payload.nr, 0));
3600 brw_set_desc(p, send, (brw_message_desc(devinfo, 2, 0, false) |
3601 brw_dp_untyped_atomic_desc(p, BRW_AOP_ADD, false)));
3602
3603 brw_inst_set_sfid(devinfo, send, sfid);
3604 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3605
3606 brw_pop_insn_state(p);
3607 }
3608
3609
3610 /**
3611 * Emit the SEND message for a barrier
3612 */
3613 void
3614 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3615 {
3616 const struct gen_device_info *devinfo = p->devinfo;
3617 struct brw_inst *inst;
3618
3619 assert(devinfo->gen >= 7);
3620
3621 brw_push_insn_state(p);
3622 brw_set_default_access_mode(p, BRW_ALIGN_1);
3623 inst = next_insn(p, BRW_OPCODE_SEND);
3624 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3625 brw_set_src0(p, inst, src);
3626 brw_set_src1(p, inst, brw_null_reg());
3627 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3628
3629 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3630 brw_inst_set_gateway_notify(devinfo, inst, 1);
3631 brw_inst_set_gateway_subfuncid(devinfo, inst,
3632 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3633
3634 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3635 brw_pop_insn_state(p);
3636 }
3637
3638
3639 /**
3640 * Emit the wait instruction for a barrier
3641 */
3642 void
3643 brw_WAIT(struct brw_codegen *p)
3644 {
3645 const struct gen_device_info *devinfo = p->devinfo;
3646 struct brw_inst *insn;
3647
3648 struct brw_reg src = brw_notification_reg();
3649
3650 insn = next_insn(p, BRW_OPCODE_WAIT);
3651 brw_set_dest(p, insn, src);
3652 brw_set_src0(p, insn, src);
3653 brw_set_src1(p, insn, brw_null_reg());
3654
3655 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3656 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3657 }
3658
3659 /**
3660 * Changes the floating point rounding mode updating the control register
3661 * field defined at cr0.0[5-6] bits. This function supports the changes to
3662 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3663 * Only RTNE and RTZ rounding are enabled at nir.
3664 */
3665 void
3666 brw_rounding_mode(struct brw_codegen *p,
3667 enum brw_rnd_mode mode)
3668 {
3669 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3670
3671 if (bits != BRW_CR0_RND_MODE_MASK) {
3672 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3673 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3674 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3675
3676 /* From the Skylake PRM, Volume 7, page 760:
3677 * "Implementation Restriction on Register Access: When the control
3678 * register is used as an explicit source and/or destination, hardware
3679 * does not ensure execution pipeline coherency. Software must set the
3680 * thread control field to ‘switch’ for an instruction that uses
3681 * control register as an explicit operand."
3682 */
3683 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3684 }
3685
3686 if (bits) {
3687 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3688 brw_imm_ud(bits));
3689 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3690 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3691 }
3692 }