intel/compiler: Handle bits 15:12 in brw_send_indirect_split_message()
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file == BRW_GENERAL_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 /* The hardware has a restriction where if the destination is Byte,
98 * the instruction needs to have a stride of 2 (except for packed byte
99 * MOV). This seems to be required even if the destination is the NULL
100 * register.
101 */
102 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
103 dest.nr == BRW_ARF_NULL &&
104 type_sz(dest.type) == 1) {
105 dest.hstride = BRW_HORIZONTAL_STRIDE_2;
106 }
107
108 gen7_convert_mrf_to_grf(p, &dest);
109
110 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
111 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC) {
112 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
113 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
114 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
115 assert(dest.subnr % 16 == 0);
116 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1 &&
117 dest.vstride == dest.width + 1);
118 assert(!dest.negate && !dest.abs);
119 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
120 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
121 brw_inst_set_send_dst_reg_file(devinfo, inst, dest.file);
122 } else {
123 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
124 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
125
126 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
127 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
128
129 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
130 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
136 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
137 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
138 dest.file == BRW_MESSAGE_REGISTER_FILE) {
139 assert(dest.writemask != 0);
140 }
141 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
142 * Although Dst.HorzStride is a don't care for Align16, HW needs
143 * this to be programmed as "01".
144 */
145 brw_inst_set_dst_hstride(devinfo, inst, 1);
146 }
147 } else {
148 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
149
150 /* These are different sizes in align1 vs align16:
151 */
152 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
153 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
154 dest.indirect_offset);
155 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
156 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
157 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
158 } else {
159 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
160 dest.indirect_offset);
161 /* even ignored in da16, still need to set as '01' */
162 brw_inst_set_dst_hstride(devinfo, inst, 1);
163 }
164 }
165 }
166
167 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
168 * or 16 (SIMD16), as that's normally correct. However, when dealing with
169 * small registers, it can be useful for us to automatically reduce it to
170 * match the register size.
171 */
172 if (p->automatic_exec_sizes) {
173 /*
174 * In platforms that support fp64 we can emit instructions with a width
175 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
176 * these cases we need to make sure that these instructions have their
177 * exec sizes set properly when they are emitted and we can't rely on
178 * this code to fix it.
179 */
180 bool fix_exec_size;
181 if (devinfo->gen >= 6)
182 fix_exec_size = dest.width < BRW_EXECUTE_4;
183 else
184 fix_exec_size = dest.width < BRW_EXECUTE_8;
185
186 if (fix_exec_size)
187 brw_inst_set_exec_size(devinfo, inst, dest.width);
188 }
189 }
190
191 void
192 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
193 {
194 const struct gen_device_info *devinfo = p->devinfo;
195
196 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
197 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
198 else if (reg.file == BRW_GENERAL_REGISTER_FILE)
199 assert(reg.nr < 128);
200
201 gen7_convert_mrf_to_grf(p, &reg);
202
203 if (devinfo->gen >= 6 &&
204 (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
205 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC ||
206 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
207 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC)) {
208 /* Any source modifiers or regions will be ignored, since this just
209 * identifies the MRF/GRF to start reading the message contents from.
210 * Check for some likely failures.
211 */
212 assert(!reg.negate);
213 assert(!reg.abs);
214 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
215 }
216
217 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
218 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC) {
219 assert(reg.file == BRW_GENERAL_REGISTER_FILE);
220 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
221 assert(reg.subnr % 16 == 0);
222 assert(reg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
223 reg.vstride == reg.width + 1);
224 assert(!reg.negate && !reg.abs);
225 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
226 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
227 } else {
228 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
229 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
230 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
231 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
232
233 if (reg.file == BRW_IMMEDIATE_VALUE) {
234 if (reg.type == BRW_REGISTER_TYPE_DF ||
235 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
236 brw_inst_set_imm_df(devinfo, inst, reg.df);
237 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
238 reg.type == BRW_REGISTER_TYPE_Q)
239 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
240 else
241 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
242
243 if (type_sz(reg.type) < 8) {
244 brw_inst_set_src1_reg_file(devinfo, inst,
245 BRW_ARCHITECTURE_REGISTER_FILE);
246 brw_inst_set_src1_reg_hw_type(devinfo, inst,
247 brw_inst_src0_reg_hw_type(devinfo, inst));
248 }
249 } else {
250 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
251 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
252 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
253 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
254 } else {
255 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
256 }
257 } else {
258 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
259
260 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
261 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
262 } else {
263 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
264 }
265 }
266
267 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
268 if (reg.width == BRW_WIDTH_1 &&
269 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
270 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
271 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
272 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
273 } else {
274 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
275 brw_inst_set_src0_width(devinfo, inst, reg.width);
276 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
277 }
278 } else {
279 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
280 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
281 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
282 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
283 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
284 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
285 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
286 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
287
288 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
289 /* This is an oddity of the fact we're using the same
290 * descriptions for registers in align_16 as align_1:
291 */
292 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
293 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
294 reg.type == BRW_REGISTER_TYPE_DF &&
295 reg.vstride == BRW_VERTICAL_STRIDE_2) {
296 /* From SNB PRM:
297 *
298 * "For Align16 access mode, only encodings of 0000 and 0011
299 * are allowed. Other codes are reserved."
300 *
301 * Presumably the DevSNB behavior applies to IVB as well.
302 */
303 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
304 } else {
305 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
306 }
307 }
308 }
309 }
310 }
311
312
313 void
314 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
315 {
316 const struct gen_device_info *devinfo = p->devinfo;
317
318 if (reg.file == BRW_GENERAL_REGISTER_FILE)
319 assert(reg.nr < 128);
320
321 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDS ||
322 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDSC) {
323 assert(reg.file == BRW_GENERAL_REGISTER_FILE ||
324 reg.file == BRW_ARCHITECTURE_REGISTER_FILE);
325 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
326 assert(reg.subnr == 0);
327 assert(reg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
328 reg.vstride == reg.width + 1);
329 assert(!reg.negate && !reg.abs);
330 brw_inst_set_send_src1_reg_nr(devinfo, inst, reg.nr);
331 brw_inst_set_send_src1_reg_file(devinfo, inst, reg.file);
332 } else {
333 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
334 *
335 * "Accumulator registers may be accessed explicitly as src0
336 * operands only."
337 */
338 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
339 reg.nr != BRW_ARF_ACCUMULATOR);
340
341 gen7_convert_mrf_to_grf(p, &reg);
342 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
343
344 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
345 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
346 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
347
348 /* Only src1 can be immediate in two-argument instructions.
349 */
350 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
351
352 if (reg.file == BRW_IMMEDIATE_VALUE) {
353 /* two-argument instructions can only use 32-bit immediates */
354 assert(type_sz(reg.type) < 8);
355 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
356 } else {
357 /* This is a hardware restriction, which may or may not be lifted
358 * in the future:
359 */
360 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
361 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
362
363 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
364 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
365 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
366 } else {
367 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
368 }
369
370 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
371 if (reg.width == BRW_WIDTH_1 &&
372 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
373 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
374 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
375 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
376 } else {
377 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
378 brw_inst_set_src1_width(devinfo, inst, reg.width);
379 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
380 }
381 } else {
382 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
383 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
384 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
385 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
386 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
387 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
388 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
389 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
390
391 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
392 /* This is an oddity of the fact we're using the same
393 * descriptions for registers in align_16 as align_1:
394 */
395 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
396 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
397 reg.type == BRW_REGISTER_TYPE_DF &&
398 reg.vstride == BRW_VERTICAL_STRIDE_2) {
399 /* From SNB PRM:
400 *
401 * "For Align16 access mode, only encodings of 0000 and 0011
402 * are allowed. Other codes are reserved."
403 *
404 * Presumably the DevSNB behavior applies to IVB as well.
405 */
406 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
407 } else {
408 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
409 }
410 }
411 }
412 }
413 }
414
415 /**
416 * Specify the descriptor and extended descriptor immediate for a SEND(C)
417 * message instruction.
418 */
419 void
420 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
421 unsigned desc, unsigned ex_desc)
422 {
423 const struct gen_device_info *devinfo = p->devinfo;
424 assert(brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
425 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC);
426 brw_inst_set_src1_file_type(devinfo, inst,
427 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_UD);
428 brw_inst_set_send_desc(devinfo, inst, desc);
429 if (devinfo->gen >= 9)
430 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
431 }
432
433 static void brw_set_math_message( struct brw_codegen *p,
434 brw_inst *inst,
435 unsigned function,
436 unsigned integer_type,
437 bool low_precision,
438 unsigned dataType )
439 {
440 const struct gen_device_info *devinfo = p->devinfo;
441 unsigned msg_length;
442 unsigned response_length;
443
444 /* Infer message length from the function */
445 switch (function) {
446 case BRW_MATH_FUNCTION_POW:
447 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
448 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
449 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
450 msg_length = 2;
451 break;
452 default:
453 msg_length = 1;
454 break;
455 }
456
457 /* Infer response length from the function */
458 switch (function) {
459 case BRW_MATH_FUNCTION_SINCOS:
460 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
461 response_length = 2;
462 break;
463 default:
464 response_length = 1;
465 break;
466 }
467
468 brw_set_desc(p, inst, brw_message_desc(
469 devinfo, msg_length, response_length, false));
470
471 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
472 brw_inst_set_math_msg_function(devinfo, inst, function);
473 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
474 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
475 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
476 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
477 brw_inst_set_saturate(devinfo, inst, 0);
478 }
479
480
481 static void brw_set_ff_sync_message(struct brw_codegen *p,
482 brw_inst *insn,
483 bool allocate,
484 unsigned response_length,
485 bool end_of_thread)
486 {
487 const struct gen_device_info *devinfo = p->devinfo;
488
489 brw_set_desc(p, insn, brw_message_desc(
490 devinfo, 1, response_length, true));
491
492 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
493 brw_inst_set_eot(devinfo, insn, end_of_thread);
494 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
495 brw_inst_set_urb_allocate(devinfo, insn, allocate);
496 /* The following fields are not used by FF_SYNC: */
497 brw_inst_set_urb_global_offset(devinfo, insn, 0);
498 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
499 brw_inst_set_urb_used(devinfo, insn, 0);
500 brw_inst_set_urb_complete(devinfo, insn, 0);
501 }
502
503 static void brw_set_urb_message( struct brw_codegen *p,
504 brw_inst *insn,
505 enum brw_urb_write_flags flags,
506 unsigned msg_length,
507 unsigned response_length,
508 unsigned offset,
509 unsigned swizzle_control )
510 {
511 const struct gen_device_info *devinfo = p->devinfo;
512
513 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
514 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
515 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
516
517 brw_set_desc(p, insn, brw_message_desc(
518 devinfo, msg_length, response_length, true));
519
520 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
521 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
522
523 if (flags & BRW_URB_WRITE_OWORD) {
524 assert(msg_length == 2); /* header + one OWORD of data */
525 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
526 } else {
527 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
528 }
529
530 brw_inst_set_urb_global_offset(devinfo, insn, offset);
531 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
532
533 if (devinfo->gen < 8) {
534 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
535 }
536
537 if (devinfo->gen < 7) {
538 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
539 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
540 } else {
541 brw_inst_set_urb_per_slot_offset(devinfo, insn,
542 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
543 }
544 }
545
546 static void
547 gen7_set_dp_scratch_message(struct brw_codegen *p,
548 brw_inst *inst,
549 bool write,
550 bool dword,
551 bool invalidate_after_read,
552 unsigned num_regs,
553 unsigned addr_offset,
554 unsigned mlen,
555 unsigned rlen,
556 bool header_present)
557 {
558 const struct gen_device_info *devinfo = p->devinfo;
559 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
560 (devinfo->gen >= 8 && num_regs == 8));
561 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
562 num_regs - 1);
563
564 brw_set_desc(p, inst, brw_message_desc(
565 devinfo, mlen, rlen, header_present));
566
567 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
568 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
569 brw_inst_set_scratch_read_write(devinfo, inst, write);
570 brw_inst_set_scratch_type(devinfo, inst, dword);
571 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
572 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
573 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
574 }
575
576 static void
577 brw_inst_set_state(const struct gen_device_info *devinfo,
578 brw_inst *insn,
579 const struct brw_insn_state *state)
580 {
581 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
582 brw_inst_set_group(devinfo, insn, state->group);
583 brw_inst_set_compression(devinfo, insn, state->compressed);
584 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
585 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
586 brw_inst_set_saturate(devinfo, insn, state->saturate);
587 brw_inst_set_pred_control(devinfo, insn, state->predicate);
588 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
589
590 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
591 state->access_mode == BRW_ALIGN_16) {
592 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
593 if (devinfo->gen >= 7)
594 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
595 } else {
596 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
597 if (devinfo->gen >= 7)
598 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
599 }
600
601 if (devinfo->gen >= 6)
602 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
603 }
604
605 #define next_insn brw_next_insn
606 brw_inst *
607 brw_next_insn(struct brw_codegen *p, unsigned opcode)
608 {
609 const struct gen_device_info *devinfo = p->devinfo;
610 brw_inst *insn;
611
612 if (p->nr_insn + 1 > p->store_size) {
613 p->store_size <<= 1;
614 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
615 }
616
617 p->next_insn_offset += 16;
618 insn = &p->store[p->nr_insn++];
619
620 memset(insn, 0, sizeof(*insn));
621 brw_inst_set_opcode(devinfo, insn, opcode);
622
623 /* Apply the default instruction state */
624 brw_inst_set_state(devinfo, insn, p->current);
625
626 return insn;
627 }
628
629 static brw_inst *
630 brw_alu1(struct brw_codegen *p, unsigned opcode,
631 struct brw_reg dest, struct brw_reg src)
632 {
633 brw_inst *insn = next_insn(p, opcode);
634 brw_set_dest(p, insn, dest);
635 brw_set_src0(p, insn, src);
636 return insn;
637 }
638
639 static brw_inst *
640 brw_alu2(struct brw_codegen *p, unsigned opcode,
641 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
642 {
643 /* 64-bit immediates are only supported on 1-src instructions */
644 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
645 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
646
647 brw_inst *insn = next_insn(p, opcode);
648 brw_set_dest(p, insn, dest);
649 brw_set_src0(p, insn, src0);
650 brw_set_src1(p, insn, src1);
651 return insn;
652 }
653
654 static int
655 get_3src_subreg_nr(struct brw_reg reg)
656 {
657 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
658 * use 32-bit units (components 0..7). Since they only support F/D/UD
659 * types, this doesn't lose any flexibility, but uses fewer bits.
660 */
661 return reg.subnr / 4;
662 }
663
664 static enum gen10_align1_3src_vertical_stride
665 to_3src_align1_vstride(enum brw_vertical_stride vstride)
666 {
667 switch (vstride) {
668 case BRW_VERTICAL_STRIDE_0:
669 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
670 case BRW_VERTICAL_STRIDE_2:
671 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
672 case BRW_VERTICAL_STRIDE_4:
673 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
674 case BRW_VERTICAL_STRIDE_8:
675 case BRW_VERTICAL_STRIDE_16:
676 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
677 default:
678 unreachable("invalid vstride");
679 }
680 }
681
682
683 static enum gen10_align1_3src_src_horizontal_stride
684 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
685 {
686 switch (hstride) {
687 case BRW_HORIZONTAL_STRIDE_0:
688 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
689 case BRW_HORIZONTAL_STRIDE_1:
690 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
691 case BRW_HORIZONTAL_STRIDE_2:
692 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
693 case BRW_HORIZONTAL_STRIDE_4:
694 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
695 default:
696 unreachable("invalid hstride");
697 }
698 }
699
700 static brw_inst *
701 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
702 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
703 {
704 const struct gen_device_info *devinfo = p->devinfo;
705 brw_inst *inst = next_insn(p, opcode);
706
707 gen7_convert_mrf_to_grf(p, &dest);
708
709 assert(dest.nr < 128);
710 assert(src0.file == BRW_IMMEDIATE_VALUE || src0.nr < 128);
711 assert(src1.file != BRW_IMMEDIATE_VALUE && src1.nr < 128);
712 assert(src2.file == BRW_IMMEDIATE_VALUE || src2.nr < 128);
713 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
714 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
715 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
716 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
717
718 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
719 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
720 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
721
722 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
723 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
724 BRW_ALIGN1_3SRC_ACCUMULATOR);
725 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
726 } else {
727 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
728 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
729 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
730 }
731 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
732
733 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
734
735 if (brw_reg_type_is_floating_point(dest.type)) {
736 brw_inst_set_3src_a1_exec_type(devinfo, inst,
737 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
738 } else {
739 brw_inst_set_3src_a1_exec_type(devinfo, inst,
740 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
741 }
742
743 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
744 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
745 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
746 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
747
748 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
749 to_3src_align1_vstride(src0.vstride));
750 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
751 to_3src_align1_vstride(src1.vstride));
752 /* no vstride on src2 */
753
754 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
755 to_3src_align1_hstride(src0.hstride));
756 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
757 to_3src_align1_hstride(src1.hstride));
758 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
759 to_3src_align1_hstride(src2.hstride));
760
761 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
762 if (src0.type == BRW_REGISTER_TYPE_NF) {
763 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
764 } else {
765 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
766 }
767 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
768 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
769
770 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
771 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
772 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
773 } else {
774 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
775 }
776 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
777 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
778
779 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
780 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
781 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
782 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
783
784 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
785 src0.file == BRW_IMMEDIATE_VALUE ||
786 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
787 src0.type == BRW_REGISTER_TYPE_NF));
788 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
789 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
790 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
791 src2.file == BRW_IMMEDIATE_VALUE);
792
793 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
794 src0.file == BRW_GENERAL_REGISTER_FILE ?
795 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
796 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
797 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
798 src1.file == BRW_GENERAL_REGISTER_FILE ?
799 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
800 BRW_ALIGN1_3SRC_ACCUMULATOR);
801 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
802 src2.file == BRW_GENERAL_REGISTER_FILE ?
803 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
804 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
805 } else {
806 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
807 dest.file == BRW_MESSAGE_REGISTER_FILE);
808 assert(dest.type == BRW_REGISTER_TYPE_F ||
809 dest.type == BRW_REGISTER_TYPE_DF ||
810 dest.type == BRW_REGISTER_TYPE_D ||
811 dest.type == BRW_REGISTER_TYPE_UD ||
812 (dest.type == BRW_REGISTER_TYPE_HF && devinfo->gen >= 8));
813 if (devinfo->gen == 6) {
814 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
815 dest.file == BRW_MESSAGE_REGISTER_FILE);
816 }
817 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
818 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
819 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
820
821 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
822 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
823 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
824 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
825 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
826 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
827 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
828 src0.vstride == BRW_VERTICAL_STRIDE_0);
829
830 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
831 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
832 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
833 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
834 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
835 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
836 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
837 src1.vstride == BRW_VERTICAL_STRIDE_0);
838
839 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
840 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
841 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
842 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
843 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
844 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
845 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
846 src2.vstride == BRW_VERTICAL_STRIDE_0);
847
848 if (devinfo->gen >= 7) {
849 /* Set both the source and destination types based on dest.type,
850 * ignoring the source register types. The MAD and LRP emitters ensure
851 * that all four types are float. The BFE and BFI2 emitters, however,
852 * may send us mixed D and UD types and want us to ignore that and use
853 * the destination type.
854 */
855 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
856 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
857
858 /* From the Bspec, 3D Media GPGPU, Instruction fields, srcType:
859 *
860 * "Three source instructions can use operands with mixed-mode
861 * precision. When SrcType field is set to :f or :hf it defines
862 * precision for source 0 only, and fields Src1Type and Src2Type
863 * define precision for other source operands:
864 *
865 * 0b = :f. Single precision Float (32-bit).
866 * 1b = :hf. Half precision Float (16-bit)."
867 */
868 if (src1.type == BRW_REGISTER_TYPE_HF)
869 brw_inst_set_3src_a16_src1_type(devinfo, inst, 1);
870
871 if (src2.type == BRW_REGISTER_TYPE_HF)
872 brw_inst_set_3src_a16_src2_type(devinfo, inst, 1);
873 }
874 }
875
876 return inst;
877 }
878
879
880 /***********************************************************************
881 * Convenience routines.
882 */
883 #define ALU1(OP) \
884 brw_inst *brw_##OP(struct brw_codegen *p, \
885 struct brw_reg dest, \
886 struct brw_reg src0) \
887 { \
888 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
889 }
890
891 #define ALU2(OP) \
892 brw_inst *brw_##OP(struct brw_codegen *p, \
893 struct brw_reg dest, \
894 struct brw_reg src0, \
895 struct brw_reg src1) \
896 { \
897 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
898 }
899
900 #define ALU3(OP) \
901 brw_inst *brw_##OP(struct brw_codegen *p, \
902 struct brw_reg dest, \
903 struct brw_reg src0, \
904 struct brw_reg src1, \
905 struct brw_reg src2) \
906 { \
907 if (p->current->access_mode == BRW_ALIGN_16) { \
908 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
909 src0.swizzle = BRW_SWIZZLE_XXXX; \
910 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
911 src1.swizzle = BRW_SWIZZLE_XXXX; \
912 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
913 src2.swizzle = BRW_SWIZZLE_XXXX; \
914 } \
915 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
916 }
917
918 #define ALU3F(OP) \
919 brw_inst *brw_##OP(struct brw_codegen *p, \
920 struct brw_reg dest, \
921 struct brw_reg src0, \
922 struct brw_reg src1, \
923 struct brw_reg src2) \
924 { \
925 assert(dest.type == BRW_REGISTER_TYPE_F || \
926 dest.type == BRW_REGISTER_TYPE_DF); \
927 if (dest.type == BRW_REGISTER_TYPE_F) { \
928 assert(src0.type == BRW_REGISTER_TYPE_F); \
929 assert(src1.type == BRW_REGISTER_TYPE_F); \
930 assert(src2.type == BRW_REGISTER_TYPE_F); \
931 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
932 assert(src0.type == BRW_REGISTER_TYPE_DF); \
933 assert(src1.type == BRW_REGISTER_TYPE_DF); \
934 assert(src2.type == BRW_REGISTER_TYPE_DF); \
935 } \
936 \
937 if (p->current->access_mode == BRW_ALIGN_16) { \
938 if (src0.vstride == BRW_VERTICAL_STRIDE_0) \
939 src0.swizzle = BRW_SWIZZLE_XXXX; \
940 if (src1.vstride == BRW_VERTICAL_STRIDE_0) \
941 src1.swizzle = BRW_SWIZZLE_XXXX; \
942 if (src2.vstride == BRW_VERTICAL_STRIDE_0) \
943 src2.swizzle = BRW_SWIZZLE_XXXX; \
944 } \
945 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
946 }
947
948 /* Rounding operations (other than RNDD) require two instructions - the first
949 * stores a rounded value (possibly the wrong way) in the dest register, but
950 * also sets a per-channel "increment bit" in the flag register. A predicated
951 * add of 1.0 fixes dest to contain the desired result.
952 *
953 * Sandybridge and later appear to round correctly without an ADD.
954 */
955 #define ROUND(OP) \
956 void brw_##OP(struct brw_codegen *p, \
957 struct brw_reg dest, \
958 struct brw_reg src) \
959 { \
960 const struct gen_device_info *devinfo = p->devinfo; \
961 brw_inst *rnd, *add; \
962 rnd = next_insn(p, BRW_OPCODE_##OP); \
963 brw_set_dest(p, rnd, dest); \
964 brw_set_src0(p, rnd, src); \
965 \
966 if (devinfo->gen < 6) { \
967 /* turn on round-increments */ \
968 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
969 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
970 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
971 } \
972 }
973
974
975 ALU2(SEL)
976 ALU1(NOT)
977 ALU2(AND)
978 ALU2(OR)
979 ALU2(XOR)
980 ALU2(SHR)
981 ALU2(SHL)
982 ALU1(DIM)
983 ALU2(ASR)
984 ALU2(ROL)
985 ALU2(ROR)
986 ALU3(CSEL)
987 ALU1(FRC)
988 ALU1(RNDD)
989 ALU2(MAC)
990 ALU2(MACH)
991 ALU1(LZD)
992 ALU2(DP4)
993 ALU2(DPH)
994 ALU2(DP3)
995 ALU2(DP2)
996 ALU3(MAD)
997 ALU3F(LRP)
998 ALU1(BFREV)
999 ALU3(BFE)
1000 ALU2(BFI1)
1001 ALU3(BFI2)
1002 ALU1(FBH)
1003 ALU1(FBL)
1004 ALU1(CBIT)
1005 ALU2(ADDC)
1006 ALU2(SUBB)
1007
1008 ROUND(RNDZ)
1009 ROUND(RNDE)
1010
1011 brw_inst *
1012 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
1013 {
1014 const struct gen_device_info *devinfo = p->devinfo;
1015
1016 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
1017 * To avoid the problems that causes, we use an <X,2,0> source region to
1018 * read each element twice.
1019 */
1020 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1021 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
1022 dest.type == BRW_REGISTER_TYPE_DF &&
1023 (src0.type == BRW_REGISTER_TYPE_F ||
1024 src0.type == BRW_REGISTER_TYPE_D ||
1025 src0.type == BRW_REGISTER_TYPE_UD) &&
1026 !has_scalar_region(src0)) {
1027 assert(src0.vstride == src0.width + src0.hstride);
1028 src0.vstride = src0.hstride;
1029 src0.width = BRW_WIDTH_2;
1030 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1031 }
1032
1033 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1034 }
1035
1036 brw_inst *
1037 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1038 struct brw_reg src0, struct brw_reg src1)
1039 {
1040 /* 6.2.2: add */
1041 if (src0.type == BRW_REGISTER_TYPE_F ||
1042 (src0.file == BRW_IMMEDIATE_VALUE &&
1043 src0.type == BRW_REGISTER_TYPE_VF)) {
1044 assert(src1.type != BRW_REGISTER_TYPE_UD);
1045 assert(src1.type != BRW_REGISTER_TYPE_D);
1046 }
1047
1048 if (src1.type == BRW_REGISTER_TYPE_F ||
1049 (src1.file == BRW_IMMEDIATE_VALUE &&
1050 src1.type == BRW_REGISTER_TYPE_VF)) {
1051 assert(src0.type != BRW_REGISTER_TYPE_UD);
1052 assert(src0.type != BRW_REGISTER_TYPE_D);
1053 }
1054
1055 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1056 }
1057
1058 brw_inst *
1059 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1060 struct brw_reg src0, struct brw_reg src1)
1061 {
1062 assert(dest.type == src0.type);
1063 assert(src0.type == src1.type);
1064 switch (src0.type) {
1065 case BRW_REGISTER_TYPE_B:
1066 case BRW_REGISTER_TYPE_UB:
1067 case BRW_REGISTER_TYPE_W:
1068 case BRW_REGISTER_TYPE_UW:
1069 case BRW_REGISTER_TYPE_D:
1070 case BRW_REGISTER_TYPE_UD:
1071 break;
1072 default:
1073 unreachable("Bad type for brw_AVG");
1074 }
1075
1076 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1077 }
1078
1079 brw_inst *
1080 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1081 struct brw_reg src0, struct brw_reg src1)
1082 {
1083 /* 6.32.38: mul */
1084 if (src0.type == BRW_REGISTER_TYPE_D ||
1085 src0.type == BRW_REGISTER_TYPE_UD ||
1086 src1.type == BRW_REGISTER_TYPE_D ||
1087 src1.type == BRW_REGISTER_TYPE_UD) {
1088 assert(dest.type != BRW_REGISTER_TYPE_F);
1089 }
1090
1091 if (src0.type == BRW_REGISTER_TYPE_F ||
1092 (src0.file == BRW_IMMEDIATE_VALUE &&
1093 src0.type == BRW_REGISTER_TYPE_VF)) {
1094 assert(src1.type != BRW_REGISTER_TYPE_UD);
1095 assert(src1.type != BRW_REGISTER_TYPE_D);
1096 }
1097
1098 if (src1.type == BRW_REGISTER_TYPE_F ||
1099 (src1.file == BRW_IMMEDIATE_VALUE &&
1100 src1.type == BRW_REGISTER_TYPE_VF)) {
1101 assert(src0.type != BRW_REGISTER_TYPE_UD);
1102 assert(src0.type != BRW_REGISTER_TYPE_D);
1103 }
1104
1105 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1106 src0.nr != BRW_ARF_ACCUMULATOR);
1107 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1108 src1.nr != BRW_ARF_ACCUMULATOR);
1109
1110 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1111 }
1112
1113 brw_inst *
1114 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1115 struct brw_reg src0, struct brw_reg src1)
1116 {
1117 src0.vstride = BRW_VERTICAL_STRIDE_0;
1118 src0.width = BRW_WIDTH_1;
1119 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1120 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1121 }
1122
1123 brw_inst *
1124 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1125 struct brw_reg src0, struct brw_reg src1)
1126 {
1127 src0.vstride = BRW_VERTICAL_STRIDE_0;
1128 src0.width = BRW_WIDTH_1;
1129 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1130 src1.vstride = BRW_VERTICAL_STRIDE_8;
1131 src1.width = BRW_WIDTH_8;
1132 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1133 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1134 }
1135
1136 brw_inst *
1137 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1138 {
1139 const struct gen_device_info *devinfo = p->devinfo;
1140 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1141 /* The F32TO16 instruction doesn't support 32-bit destination types in
1142 * Align1 mode, and neither does the Gen8 implementation in terms of a
1143 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1144 * an undocumented feature.
1145 */
1146 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1147 (!align16 || devinfo->gen >= 8));
1148 brw_inst *inst;
1149
1150 if (align16) {
1151 assert(dst.type == BRW_REGISTER_TYPE_UD);
1152 } else {
1153 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1154 dst.type == BRW_REGISTER_TYPE_W ||
1155 dst.type == BRW_REGISTER_TYPE_UW ||
1156 dst.type == BRW_REGISTER_TYPE_HF);
1157 }
1158
1159 brw_push_insn_state(p);
1160
1161 if (needs_zero_fill) {
1162 brw_set_default_access_mode(p, BRW_ALIGN_1);
1163 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1164 }
1165
1166 if (devinfo->gen >= 8) {
1167 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1168 } else {
1169 assert(devinfo->gen == 7);
1170 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1171 }
1172
1173 if (needs_zero_fill) {
1174 brw_inst_set_no_dd_clear(devinfo, inst, true);
1175 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1176 brw_inst_set_no_dd_check(devinfo, inst, true);
1177 }
1178
1179 brw_pop_insn_state(p);
1180 return inst;
1181 }
1182
1183 brw_inst *
1184 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1185 {
1186 const struct gen_device_info *devinfo = p->devinfo;
1187 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1188
1189 if (align16) {
1190 assert(src.type == BRW_REGISTER_TYPE_UD);
1191 } else {
1192 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1193 *
1194 * Because this instruction does not have a 16-bit floating-point
1195 * type, the source data type must be Word (W). The destination type
1196 * must be F (Float).
1197 */
1198 if (src.type == BRW_REGISTER_TYPE_UD)
1199 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1200
1201 assert(src.type == BRW_REGISTER_TYPE_W ||
1202 src.type == BRW_REGISTER_TYPE_UW ||
1203 src.type == BRW_REGISTER_TYPE_HF);
1204 }
1205
1206 if (devinfo->gen >= 8) {
1207 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1208 } else {
1209 assert(devinfo->gen == 7);
1210 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1211 }
1212 }
1213
1214
1215 void brw_NOP(struct brw_codegen *p)
1216 {
1217 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1218 memset(insn, 0, sizeof(*insn));
1219 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1220 }
1221
1222
1223
1224
1225
1226 /***********************************************************************
1227 * Comparisons, if/else/endif
1228 */
1229
1230 brw_inst *
1231 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1232 unsigned predicate_control)
1233 {
1234 const struct gen_device_info *devinfo = p->devinfo;
1235 struct brw_reg ip = brw_ip_reg();
1236 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1237
1238 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1239 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1240 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1241 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1242
1243 return inst;
1244 }
1245
1246 static void
1247 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1248 {
1249 p->if_stack[p->if_stack_depth] = inst - p->store;
1250
1251 p->if_stack_depth++;
1252 if (p->if_stack_array_size <= p->if_stack_depth) {
1253 p->if_stack_array_size *= 2;
1254 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1255 p->if_stack_array_size);
1256 }
1257 }
1258
1259 static brw_inst *
1260 pop_if_stack(struct brw_codegen *p)
1261 {
1262 p->if_stack_depth--;
1263 return &p->store[p->if_stack[p->if_stack_depth]];
1264 }
1265
1266 static void
1267 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1268 {
1269 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1270 p->loop_stack_array_size *= 2;
1271 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1272 p->loop_stack_array_size);
1273 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1274 p->loop_stack_array_size);
1275 }
1276
1277 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1278 p->loop_stack_depth++;
1279 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1280 }
1281
1282 static brw_inst *
1283 get_inner_do_insn(struct brw_codegen *p)
1284 {
1285 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1286 }
1287
1288 /* EU takes the value from the flag register and pushes it onto some
1289 * sort of a stack (presumably merging with any flag value already on
1290 * the stack). Within an if block, the flags at the top of the stack
1291 * control execution on each channel of the unit, eg. on each of the
1292 * 16 pixel values in our wm programs.
1293 *
1294 * When the matching 'else' instruction is reached (presumably by
1295 * countdown of the instruction count patched in by our ELSE/ENDIF
1296 * functions), the relevant flags are inverted.
1297 *
1298 * When the matching 'endif' instruction is reached, the flags are
1299 * popped off. If the stack is now empty, normal execution resumes.
1300 */
1301 brw_inst *
1302 brw_IF(struct brw_codegen *p, unsigned execute_size)
1303 {
1304 const struct gen_device_info *devinfo = p->devinfo;
1305 brw_inst *insn;
1306
1307 insn = next_insn(p, BRW_OPCODE_IF);
1308
1309 /* Override the defaults for this instruction:
1310 */
1311 if (devinfo->gen < 6) {
1312 brw_set_dest(p, insn, brw_ip_reg());
1313 brw_set_src0(p, insn, brw_ip_reg());
1314 brw_set_src1(p, insn, brw_imm_d(0x0));
1315 } else if (devinfo->gen == 6) {
1316 brw_set_dest(p, insn, brw_imm_w(0));
1317 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1318 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1319 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1320 } else if (devinfo->gen == 7) {
1321 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1322 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1323 brw_set_src1(p, insn, brw_imm_w(0));
1324 brw_inst_set_jip(devinfo, insn, 0);
1325 brw_inst_set_uip(devinfo, insn, 0);
1326 } else {
1327 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1328 brw_set_src0(p, insn, brw_imm_d(0));
1329 brw_inst_set_jip(devinfo, insn, 0);
1330 brw_inst_set_uip(devinfo, insn, 0);
1331 }
1332
1333 brw_inst_set_exec_size(devinfo, insn, execute_size);
1334 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1335 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1336 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1337 if (!p->single_program_flow && devinfo->gen < 6)
1338 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1339
1340 push_if_stack(p, insn);
1341 p->if_depth_in_loop[p->loop_stack_depth]++;
1342 return insn;
1343 }
1344
1345 /* This function is only used for gen6-style IF instructions with an
1346 * embedded comparison (conditional modifier). It is not used on gen7.
1347 */
1348 brw_inst *
1349 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1350 struct brw_reg src0, struct brw_reg src1)
1351 {
1352 const struct gen_device_info *devinfo = p->devinfo;
1353 brw_inst *insn;
1354
1355 insn = next_insn(p, BRW_OPCODE_IF);
1356
1357 brw_set_dest(p, insn, brw_imm_w(0));
1358 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1359 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1360 brw_set_src0(p, insn, src0);
1361 brw_set_src1(p, insn, src1);
1362
1363 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1364 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1365 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1366
1367 push_if_stack(p, insn);
1368 return insn;
1369 }
1370
1371 /**
1372 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1373 */
1374 static void
1375 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1376 brw_inst *if_inst, brw_inst *else_inst)
1377 {
1378 const struct gen_device_info *devinfo = p->devinfo;
1379
1380 /* The next instruction (where the ENDIF would be, if it existed) */
1381 brw_inst *next_inst = &p->store[p->nr_insn];
1382
1383 assert(p->single_program_flow);
1384 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1385 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1386 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1387
1388 /* Convert IF to an ADD instruction that moves the instruction pointer
1389 * to the first instruction of the ELSE block. If there is no ELSE
1390 * block, point to where ENDIF would be. Reverse the predicate.
1391 *
1392 * There's no need to execute an ENDIF since we don't need to do any
1393 * stack operations, and if we're currently executing, we just want to
1394 * continue normally.
1395 */
1396 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1397 brw_inst_set_pred_inv(devinfo, if_inst, true);
1398
1399 if (else_inst != NULL) {
1400 /* Convert ELSE to an ADD instruction that points where the ENDIF
1401 * would be.
1402 */
1403 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1404
1405 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1406 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1407 } else {
1408 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1409 }
1410 }
1411
1412 /**
1413 * Patch IF and ELSE instructions with appropriate jump targets.
1414 */
1415 static void
1416 patch_IF_ELSE(struct brw_codegen *p,
1417 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1418 {
1419 const struct gen_device_info *devinfo = p->devinfo;
1420
1421 /* We shouldn't be patching IF and ELSE instructions in single program flow
1422 * mode when gen < 6, because in single program flow mode on those
1423 * platforms, we convert flow control instructions to conditional ADDs that
1424 * operate on IP (see brw_ENDIF).
1425 *
1426 * However, on Gen6, writing to IP doesn't work in single program flow mode
1427 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1428 * not be updated by non-flow control instructions."). And on later
1429 * platforms, there is no significant benefit to converting control flow
1430 * instructions to conditional ADDs. So we do patch IF and ELSE
1431 * instructions in single program flow mode on those platforms.
1432 */
1433 if (devinfo->gen < 6)
1434 assert(!p->single_program_flow);
1435
1436 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1437 assert(endif_inst != NULL);
1438 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1439
1440 unsigned br = brw_jump_scale(devinfo);
1441
1442 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1443 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1444
1445 if (else_inst == NULL) {
1446 /* Patch IF -> ENDIF */
1447 if (devinfo->gen < 6) {
1448 /* Turn it into an IFF, which means no mask stack operations for
1449 * all-false and jumping past the ENDIF.
1450 */
1451 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1452 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1453 br * (endif_inst - if_inst + 1));
1454 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1455 } else if (devinfo->gen == 6) {
1456 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1457 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1458 } else {
1459 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1460 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1461 }
1462 } else {
1463 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1464
1465 /* Patch IF -> ELSE */
1466 if (devinfo->gen < 6) {
1467 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1468 br * (else_inst - if_inst));
1469 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1470 } else if (devinfo->gen == 6) {
1471 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1472 br * (else_inst - if_inst + 1));
1473 }
1474
1475 /* Patch ELSE -> ENDIF */
1476 if (devinfo->gen < 6) {
1477 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1478 * matching ENDIF.
1479 */
1480 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1481 br * (endif_inst - else_inst + 1));
1482 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1483 } else if (devinfo->gen == 6) {
1484 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1485 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1486 br * (endif_inst - else_inst));
1487 } else {
1488 /* The IF instruction's JIP should point just past the ELSE */
1489 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1490 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1491 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1492 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1493 if (devinfo->gen >= 8) {
1494 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1495 * should point to ENDIF.
1496 */
1497 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1498 }
1499 }
1500 }
1501 }
1502
1503 void
1504 brw_ELSE(struct brw_codegen *p)
1505 {
1506 const struct gen_device_info *devinfo = p->devinfo;
1507 brw_inst *insn;
1508
1509 insn = next_insn(p, BRW_OPCODE_ELSE);
1510
1511 if (devinfo->gen < 6) {
1512 brw_set_dest(p, insn, brw_ip_reg());
1513 brw_set_src0(p, insn, brw_ip_reg());
1514 brw_set_src1(p, insn, brw_imm_d(0x0));
1515 } else if (devinfo->gen == 6) {
1516 brw_set_dest(p, insn, brw_imm_w(0));
1517 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1518 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1519 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1520 } else if (devinfo->gen == 7) {
1521 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1522 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1523 brw_set_src1(p, insn, brw_imm_w(0));
1524 brw_inst_set_jip(devinfo, insn, 0);
1525 brw_inst_set_uip(devinfo, insn, 0);
1526 } else {
1527 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1528 brw_set_src0(p, insn, brw_imm_d(0));
1529 brw_inst_set_jip(devinfo, insn, 0);
1530 brw_inst_set_uip(devinfo, insn, 0);
1531 }
1532
1533 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1534 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1535 if (!p->single_program_flow && devinfo->gen < 6)
1536 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1537
1538 push_if_stack(p, insn);
1539 }
1540
1541 void
1542 brw_ENDIF(struct brw_codegen *p)
1543 {
1544 const struct gen_device_info *devinfo = p->devinfo;
1545 brw_inst *insn = NULL;
1546 brw_inst *else_inst = NULL;
1547 brw_inst *if_inst = NULL;
1548 brw_inst *tmp;
1549 bool emit_endif = true;
1550
1551 /* In single program flow mode, we can express IF and ELSE instructions
1552 * equivalently as ADD instructions that operate on IP. On platforms prior
1553 * to Gen6, flow control instructions cause an implied thread switch, so
1554 * this is a significant savings.
1555 *
1556 * However, on Gen6, writing to IP doesn't work in single program flow mode
1557 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1558 * not be updated by non-flow control instructions."). And on later
1559 * platforms, there is no significant benefit to converting control flow
1560 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1561 * Gen5.
1562 */
1563 if (devinfo->gen < 6 && p->single_program_flow)
1564 emit_endif = false;
1565
1566 /*
1567 * A single next_insn() may change the base address of instruction store
1568 * memory(p->store), so call it first before referencing the instruction
1569 * store pointer from an index
1570 */
1571 if (emit_endif)
1572 insn = next_insn(p, BRW_OPCODE_ENDIF);
1573
1574 /* Pop the IF and (optional) ELSE instructions from the stack */
1575 p->if_depth_in_loop[p->loop_stack_depth]--;
1576 tmp = pop_if_stack(p);
1577 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1578 else_inst = tmp;
1579 tmp = pop_if_stack(p);
1580 }
1581 if_inst = tmp;
1582
1583 if (!emit_endif) {
1584 /* ENDIF is useless; don't bother emitting it. */
1585 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1586 return;
1587 }
1588
1589 if (devinfo->gen < 6) {
1590 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1591 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1592 brw_set_src1(p, insn, brw_imm_d(0x0));
1593 } else if (devinfo->gen == 6) {
1594 brw_set_dest(p, insn, brw_imm_w(0));
1595 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1596 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1597 } else if (devinfo->gen == 7) {
1598 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1599 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1600 brw_set_src1(p, insn, brw_imm_w(0));
1601 } else {
1602 brw_set_src0(p, insn, brw_imm_d(0));
1603 }
1604
1605 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1606 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1607 if (devinfo->gen < 6)
1608 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1609
1610 /* Also pop item off the stack in the endif instruction: */
1611 if (devinfo->gen < 6) {
1612 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1613 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1614 } else if (devinfo->gen == 6) {
1615 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1616 } else {
1617 brw_inst_set_jip(devinfo, insn, 2);
1618 }
1619 patch_IF_ELSE(p, if_inst, else_inst, insn);
1620 }
1621
1622 brw_inst *
1623 brw_BREAK(struct brw_codegen *p)
1624 {
1625 const struct gen_device_info *devinfo = p->devinfo;
1626 brw_inst *insn;
1627
1628 insn = next_insn(p, BRW_OPCODE_BREAK);
1629 if (devinfo->gen >= 8) {
1630 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1631 brw_set_src0(p, insn, brw_imm_d(0x0));
1632 } else if (devinfo->gen >= 6) {
1633 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1634 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1635 brw_set_src1(p, insn, brw_imm_d(0x0));
1636 } else {
1637 brw_set_dest(p, insn, brw_ip_reg());
1638 brw_set_src0(p, insn, brw_ip_reg());
1639 brw_set_src1(p, insn, brw_imm_d(0x0));
1640 brw_inst_set_gen4_pop_count(devinfo, insn,
1641 p->if_depth_in_loop[p->loop_stack_depth]);
1642 }
1643 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1644 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1645
1646 return insn;
1647 }
1648
1649 brw_inst *
1650 brw_CONT(struct brw_codegen *p)
1651 {
1652 const struct gen_device_info *devinfo = p->devinfo;
1653 brw_inst *insn;
1654
1655 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1656 brw_set_dest(p, insn, brw_ip_reg());
1657 if (devinfo->gen >= 8) {
1658 brw_set_src0(p, insn, brw_imm_d(0x0));
1659 } else {
1660 brw_set_src0(p, insn, brw_ip_reg());
1661 brw_set_src1(p, insn, brw_imm_d(0x0));
1662 }
1663
1664 if (devinfo->gen < 6) {
1665 brw_inst_set_gen4_pop_count(devinfo, insn,
1666 p->if_depth_in_loop[p->loop_stack_depth]);
1667 }
1668 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1669 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1670 return insn;
1671 }
1672
1673 brw_inst *
1674 gen6_HALT(struct brw_codegen *p)
1675 {
1676 const struct gen_device_info *devinfo = p->devinfo;
1677 brw_inst *insn;
1678
1679 insn = next_insn(p, BRW_OPCODE_HALT);
1680 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1681 if (devinfo->gen >= 8) {
1682 brw_set_src0(p, insn, brw_imm_d(0x0));
1683 } else {
1684 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1685 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1686 }
1687
1688 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1689 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1690 return insn;
1691 }
1692
1693 /* DO/WHILE loop:
1694 *
1695 * The DO/WHILE is just an unterminated loop -- break or continue are
1696 * used for control within the loop. We have a few ways they can be
1697 * done.
1698 *
1699 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1700 * jip and no DO instruction.
1701 *
1702 * For non-uniform control flow pre-gen6, there's a DO instruction to
1703 * push the mask, and a WHILE to jump back, and BREAK to get out and
1704 * pop the mask.
1705 *
1706 * For gen6, there's no more mask stack, so no need for DO. WHILE
1707 * just points back to the first instruction of the loop.
1708 */
1709 brw_inst *
1710 brw_DO(struct brw_codegen *p, unsigned execute_size)
1711 {
1712 const struct gen_device_info *devinfo = p->devinfo;
1713
1714 if (devinfo->gen >= 6 || p->single_program_flow) {
1715 push_loop_stack(p, &p->store[p->nr_insn]);
1716 return &p->store[p->nr_insn];
1717 } else {
1718 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1719
1720 push_loop_stack(p, insn);
1721
1722 /* Override the defaults for this instruction:
1723 */
1724 brw_set_dest(p, insn, brw_null_reg());
1725 brw_set_src0(p, insn, brw_null_reg());
1726 brw_set_src1(p, insn, brw_null_reg());
1727
1728 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1729 brw_inst_set_exec_size(devinfo, insn, execute_size);
1730 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1731
1732 return insn;
1733 }
1734 }
1735
1736 /**
1737 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1738 * instruction here.
1739 *
1740 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1741 * nesting, since it can always just point to the end of the block/current loop.
1742 */
1743 static void
1744 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1745 {
1746 const struct gen_device_info *devinfo = p->devinfo;
1747 brw_inst *do_inst = get_inner_do_insn(p);
1748 brw_inst *inst;
1749 unsigned br = brw_jump_scale(devinfo);
1750
1751 assert(devinfo->gen < 6);
1752
1753 for (inst = while_inst - 1; inst != do_inst; inst--) {
1754 /* If the jump count is != 0, that means that this instruction has already
1755 * been patched because it's part of a loop inside of the one we're
1756 * patching.
1757 */
1758 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1759 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1760 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1761 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1762 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1763 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1764 }
1765 }
1766 }
1767
1768 brw_inst *
1769 brw_WHILE(struct brw_codegen *p)
1770 {
1771 const struct gen_device_info *devinfo = p->devinfo;
1772 brw_inst *insn, *do_insn;
1773 unsigned br = brw_jump_scale(devinfo);
1774
1775 if (devinfo->gen >= 6) {
1776 insn = next_insn(p, BRW_OPCODE_WHILE);
1777 do_insn = get_inner_do_insn(p);
1778
1779 if (devinfo->gen >= 8) {
1780 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1781 brw_set_src0(p, insn, brw_imm_d(0));
1782 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1783 } else if (devinfo->gen == 7) {
1784 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1785 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1786 brw_set_src1(p, insn, brw_imm_w(0));
1787 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1788 } else {
1789 brw_set_dest(p, insn, brw_imm_w(0));
1790 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1791 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1792 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1793 }
1794
1795 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1796
1797 } else {
1798 if (p->single_program_flow) {
1799 insn = next_insn(p, BRW_OPCODE_ADD);
1800 do_insn = get_inner_do_insn(p);
1801
1802 brw_set_dest(p, insn, brw_ip_reg());
1803 brw_set_src0(p, insn, brw_ip_reg());
1804 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1805 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1806 } else {
1807 insn = next_insn(p, BRW_OPCODE_WHILE);
1808 do_insn = get_inner_do_insn(p);
1809
1810 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1811
1812 brw_set_dest(p, insn, brw_ip_reg());
1813 brw_set_src0(p, insn, brw_ip_reg());
1814 brw_set_src1(p, insn, brw_imm_d(0));
1815
1816 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1817 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1818 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1819
1820 brw_patch_break_cont(p, insn);
1821 }
1822 }
1823 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1824
1825 p->loop_stack_depth--;
1826
1827 return insn;
1828 }
1829
1830 /* FORWARD JUMPS:
1831 */
1832 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1833 {
1834 const struct gen_device_info *devinfo = p->devinfo;
1835 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1836 unsigned jmpi = 1;
1837
1838 if (devinfo->gen >= 5)
1839 jmpi = 2;
1840
1841 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1842 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1843
1844 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1845 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1846 }
1847
1848 /* To integrate with the above, it makes sense that the comparison
1849 * instruction should populate the flag register. It might be simpler
1850 * just to use the flag reg for most WM tasks?
1851 */
1852 void brw_CMP(struct brw_codegen *p,
1853 struct brw_reg dest,
1854 unsigned conditional,
1855 struct brw_reg src0,
1856 struct brw_reg src1)
1857 {
1858 const struct gen_device_info *devinfo = p->devinfo;
1859 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1860
1861 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1862 brw_set_dest(p, insn, dest);
1863 brw_set_src0(p, insn, src0);
1864 brw_set_src1(p, insn, src1);
1865
1866 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1867 * page says:
1868 * "Any CMP instruction with a null destination must use a {switch}."
1869 *
1870 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1871 * mentioned on their work-arounds pages.
1872 */
1873 if (devinfo->gen == 7) {
1874 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1875 dest.nr == BRW_ARF_NULL) {
1876 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1877 }
1878 }
1879 }
1880
1881 /***********************************************************************
1882 * Helpers for the various SEND message types:
1883 */
1884
1885 /** Extended math function, float[8].
1886 */
1887 void gen4_math(struct brw_codegen *p,
1888 struct brw_reg dest,
1889 unsigned function,
1890 unsigned msg_reg_nr,
1891 struct brw_reg src,
1892 unsigned precision )
1893 {
1894 const struct gen_device_info *devinfo = p->devinfo;
1895 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1896 unsigned data_type;
1897 if (has_scalar_region(src)) {
1898 data_type = BRW_MATH_DATA_SCALAR;
1899 } else {
1900 data_type = BRW_MATH_DATA_VECTOR;
1901 }
1902
1903 assert(devinfo->gen < 6);
1904
1905 /* Example code doesn't set predicate_control for send
1906 * instructions.
1907 */
1908 brw_inst_set_pred_control(devinfo, insn, 0);
1909 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1910
1911 brw_set_dest(p, insn, dest);
1912 brw_set_src0(p, insn, src);
1913 brw_set_math_message(p,
1914 insn,
1915 function,
1916 src.type == BRW_REGISTER_TYPE_D,
1917 precision,
1918 data_type);
1919 }
1920
1921 void gen6_math(struct brw_codegen *p,
1922 struct brw_reg dest,
1923 unsigned function,
1924 struct brw_reg src0,
1925 struct brw_reg src1)
1926 {
1927 const struct gen_device_info *devinfo = p->devinfo;
1928 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1929
1930 assert(devinfo->gen >= 6);
1931
1932 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1933 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1934
1935 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1936 if (devinfo->gen == 6) {
1937 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1938 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1939 }
1940
1941 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1942 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1943 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1944 assert(src0.type != BRW_REGISTER_TYPE_F);
1945 assert(src1.type != BRW_REGISTER_TYPE_F);
1946 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1947 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1948 } else {
1949 assert(src0.type == BRW_REGISTER_TYPE_F ||
1950 (src0.type == BRW_REGISTER_TYPE_HF && devinfo->gen >= 9));
1951 assert(src1.type == BRW_REGISTER_TYPE_F ||
1952 (src1.type == BRW_REGISTER_TYPE_HF && devinfo->gen >= 9));
1953 }
1954
1955 /* Source modifiers are ignored for extended math instructions on Gen6. */
1956 if (devinfo->gen == 6) {
1957 assert(!src0.negate);
1958 assert(!src0.abs);
1959 assert(!src1.negate);
1960 assert(!src1.abs);
1961 }
1962
1963 brw_inst_set_math_function(devinfo, insn, function);
1964
1965 brw_set_dest(p, insn, dest);
1966 brw_set_src0(p, insn, src0);
1967 brw_set_src1(p, insn, src1);
1968 }
1969
1970 /**
1971 * Return the right surface index to access the thread scratch space using
1972 * stateless dataport messages.
1973 */
1974 unsigned
1975 brw_scratch_surface_idx(const struct brw_codegen *p)
1976 {
1977 /* The scratch space is thread-local so IA coherency is unnecessary. */
1978 if (p->devinfo->gen >= 8)
1979 return GEN8_BTI_STATELESS_NON_COHERENT;
1980 else
1981 return BRW_BTI_STATELESS;
1982 }
1983
1984 /**
1985 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1986 * using a constant offset per channel.
1987 *
1988 * The offset must be aligned to oword size (16 bytes). Used for
1989 * register spilling.
1990 */
1991 void brw_oword_block_write_scratch(struct brw_codegen *p,
1992 struct brw_reg mrf,
1993 int num_regs,
1994 unsigned offset)
1995 {
1996 const struct gen_device_info *devinfo = p->devinfo;
1997 const unsigned target_cache =
1998 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1999 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2000 BRW_SFID_DATAPORT_WRITE);
2001 uint32_t msg_type;
2002
2003 if (devinfo->gen >= 6)
2004 offset /= 16;
2005
2006 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2007
2008 const unsigned mlen = 1 + num_regs;
2009
2010 /* Set up the message header. This is g0, with g0.2 filled with
2011 * the offset. We don't want to leave our offset around in g0 or
2012 * it'll screw up texture samples, so set it up inside the message
2013 * reg.
2014 */
2015 {
2016 brw_push_insn_state(p);
2017 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2018 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2019 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2020
2021 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2022
2023 /* set message header global offset field (reg 0, element 2) */
2024 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2025 brw_MOV(p,
2026 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2027 mrf.nr,
2028 2), BRW_REGISTER_TYPE_UD),
2029 brw_imm_ud(offset));
2030
2031 brw_pop_insn_state(p);
2032 }
2033
2034 {
2035 struct brw_reg dest;
2036 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2037 int send_commit_msg;
2038 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2039 BRW_REGISTER_TYPE_UW);
2040
2041 brw_inst_set_sfid(devinfo, insn, target_cache);
2042 brw_inst_set_compression(devinfo, insn, false);
2043
2044 if (brw_inst_exec_size(devinfo, insn) >= 16)
2045 src_header = vec16(src_header);
2046
2047 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2048 if (devinfo->gen < 6)
2049 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2050
2051 /* Until gen6, writes followed by reads from the same location
2052 * are not guaranteed to be ordered unless write_commit is set.
2053 * If set, then a no-op write is issued to the destination
2054 * register to set a dependency, and a read from the destination
2055 * can be used to ensure the ordering.
2056 *
2057 * For gen6, only writes between different threads need ordering
2058 * protection. Our use of DP writes is all about register
2059 * spilling within a thread.
2060 */
2061 if (devinfo->gen >= 6) {
2062 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2063 send_commit_msg = 0;
2064 } else {
2065 dest = src_header;
2066 send_commit_msg = 1;
2067 }
2068
2069 brw_set_dest(p, insn, dest);
2070 if (devinfo->gen >= 6) {
2071 brw_set_src0(p, insn, mrf);
2072 } else {
2073 brw_set_src0(p, insn, brw_null_reg());
2074 }
2075
2076 if (devinfo->gen >= 6)
2077 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2078 else
2079 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2080
2081 brw_set_desc(p, insn,
2082 brw_message_desc(devinfo, mlen, send_commit_msg, true) |
2083 brw_dp_write_desc(devinfo, brw_scratch_surface_idx(p),
2084 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2085 msg_type, 0, /* not a render target */
2086 send_commit_msg));
2087 }
2088 }
2089
2090
2091 /**
2092 * Read a block of owords (half a GRF each) from the scratch buffer
2093 * using a constant index per channel.
2094 *
2095 * Offset must be aligned to oword size (16 bytes). Used for register
2096 * spilling.
2097 */
2098 void
2099 brw_oword_block_read_scratch(struct brw_codegen *p,
2100 struct brw_reg dest,
2101 struct brw_reg mrf,
2102 int num_regs,
2103 unsigned offset)
2104 {
2105 const struct gen_device_info *devinfo = p->devinfo;
2106
2107 if (devinfo->gen >= 6)
2108 offset /= 16;
2109
2110 if (p->devinfo->gen >= 7) {
2111 /* On gen 7 and above, we no longer have message registers and we can
2112 * send from any register we want. By using the destination register
2113 * for the message, we guarantee that the implied message write won't
2114 * accidentally overwrite anything. This has been a problem because
2115 * the MRF registers and source for the final FB write are both fixed
2116 * and may overlap.
2117 */
2118 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2119 } else {
2120 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2121 }
2122 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2123
2124 const unsigned rlen = num_regs;
2125 const unsigned target_cache =
2126 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2127 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2128 BRW_SFID_DATAPORT_READ);
2129
2130 {
2131 brw_push_insn_state(p);
2132 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2133 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2134 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2135
2136 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2137
2138 /* set message header global offset field (reg 0, element 2) */
2139 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2140 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2141
2142 brw_pop_insn_state(p);
2143 }
2144
2145 {
2146 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2147
2148 brw_inst_set_sfid(devinfo, insn, target_cache);
2149 assert(brw_inst_pred_control(devinfo, insn) == 0);
2150 brw_inst_set_compression(devinfo, insn, false);
2151
2152 brw_set_dest(p, insn, dest); /* UW? */
2153 if (devinfo->gen >= 6) {
2154 brw_set_src0(p, insn, mrf);
2155 } else {
2156 brw_set_src0(p, insn, brw_null_reg());
2157 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2158 }
2159
2160 brw_set_desc(p, insn,
2161 brw_message_desc(devinfo, 1, rlen, true) |
2162 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2163 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2164 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2165 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2166 }
2167 }
2168
2169 void
2170 gen7_block_read_scratch(struct brw_codegen *p,
2171 struct brw_reg dest,
2172 int num_regs,
2173 unsigned offset)
2174 {
2175 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2176 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2177
2178 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2179
2180 /* The HW requires that the header is present; this is to get the g0.5
2181 * scratch offset.
2182 */
2183 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2184
2185 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2186 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2187 * is 32 bytes, which happens to be the size of a register.
2188 */
2189 offset /= REG_SIZE;
2190 assert(offset < (1 << 12));
2191
2192 gen7_set_dp_scratch_message(p, insn,
2193 false, /* scratch read */
2194 false, /* OWords */
2195 false, /* invalidate after read */
2196 num_regs,
2197 offset,
2198 1, /* mlen: just g0 */
2199 num_regs, /* rlen */
2200 true); /* header present */
2201 }
2202
2203 /**
2204 * Read float[4] vectors from the data port constant cache.
2205 * Location (in buffer) should be a multiple of 16.
2206 * Used for fetching shader constants.
2207 */
2208 void brw_oword_block_read(struct brw_codegen *p,
2209 struct brw_reg dest,
2210 struct brw_reg mrf,
2211 uint32_t offset,
2212 uint32_t bind_table_index)
2213 {
2214 const struct gen_device_info *devinfo = p->devinfo;
2215 const unsigned target_cache =
2216 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2217 BRW_SFID_DATAPORT_READ);
2218 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2219
2220 /* On newer hardware, offset is in units of owords. */
2221 if (devinfo->gen >= 6)
2222 offset /= 16;
2223
2224 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2225
2226 brw_push_insn_state(p);
2227 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2228 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2229 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2230
2231 brw_push_insn_state(p);
2232 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2233 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2234
2235 /* set message header global offset field (reg 0, element 2) */
2236 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2237 brw_MOV(p,
2238 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2239 mrf.nr,
2240 2), BRW_REGISTER_TYPE_UD),
2241 brw_imm_ud(offset));
2242 brw_pop_insn_state(p);
2243
2244 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2245
2246 brw_inst_set_sfid(devinfo, insn, target_cache);
2247
2248 /* cast dest to a uword[8] vector */
2249 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2250
2251 brw_set_dest(p, insn, dest);
2252 if (devinfo->gen >= 6) {
2253 brw_set_src0(p, insn, mrf);
2254 } else {
2255 brw_set_src0(p, insn, brw_null_reg());
2256 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2257 }
2258
2259 brw_set_desc(p, insn,
2260 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2261 brw_dp_read_desc(devinfo, bind_table_index,
2262 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2263 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2264 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2265
2266 brw_pop_insn_state(p);
2267 }
2268
2269 brw_inst *
2270 brw_fb_WRITE(struct brw_codegen *p,
2271 struct brw_reg payload,
2272 struct brw_reg implied_header,
2273 unsigned msg_control,
2274 unsigned binding_table_index,
2275 unsigned msg_length,
2276 unsigned response_length,
2277 bool eot,
2278 bool last_render_target,
2279 bool header_present)
2280 {
2281 const struct gen_device_info *devinfo = p->devinfo;
2282 const unsigned target_cache =
2283 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2284 BRW_SFID_DATAPORT_WRITE);
2285 brw_inst *insn;
2286 unsigned msg_type;
2287 struct brw_reg dest, src0;
2288
2289 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2290 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2291 else
2292 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2293
2294 if (devinfo->gen >= 6) {
2295 insn = next_insn(p, BRW_OPCODE_SENDC);
2296 } else {
2297 insn = next_insn(p, BRW_OPCODE_SEND);
2298 }
2299 brw_inst_set_sfid(devinfo, insn, target_cache);
2300 brw_inst_set_compression(devinfo, insn, false);
2301
2302 if (devinfo->gen >= 6) {
2303 /* headerless version, just submit color payload */
2304 src0 = payload;
2305
2306 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2307 } else {
2308 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2309 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2310 src0 = implied_header;
2311
2312 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2313 }
2314
2315 brw_set_dest(p, insn, dest);
2316 brw_set_src0(p, insn, src0);
2317 brw_set_desc(p, insn,
2318 brw_message_desc(devinfo, msg_length, response_length,
2319 header_present) |
2320 brw_dp_write_desc(devinfo, binding_table_index, msg_control,
2321 msg_type, last_render_target,
2322 0 /* send_commit_msg */));
2323 brw_inst_set_eot(devinfo, insn, eot);
2324
2325 return insn;
2326 }
2327
2328 brw_inst *
2329 gen9_fb_READ(struct brw_codegen *p,
2330 struct brw_reg dst,
2331 struct brw_reg payload,
2332 unsigned binding_table_index,
2333 unsigned msg_length,
2334 unsigned response_length,
2335 bool per_sample)
2336 {
2337 const struct gen_device_info *devinfo = p->devinfo;
2338 assert(devinfo->gen >= 9);
2339 const unsigned msg_subtype =
2340 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2341 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2342
2343 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2344 brw_set_dest(p, insn, dst);
2345 brw_set_src0(p, insn, payload);
2346 brw_set_desc(
2347 p, insn,
2348 brw_message_desc(devinfo, msg_length, response_length, true) |
2349 brw_dp_read_desc(devinfo, binding_table_index,
2350 per_sample << 5 | msg_subtype,
2351 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2352 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2353 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2354
2355 return insn;
2356 }
2357
2358 /**
2359 * Texture sample instruction.
2360 * Note: the msg_type plus msg_length values determine exactly what kind
2361 * of sampling operation is performed. See volume 4, page 161 of docs.
2362 */
2363 void brw_SAMPLE(struct brw_codegen *p,
2364 struct brw_reg dest,
2365 unsigned msg_reg_nr,
2366 struct brw_reg src0,
2367 unsigned binding_table_index,
2368 unsigned sampler,
2369 unsigned msg_type,
2370 unsigned response_length,
2371 unsigned msg_length,
2372 unsigned header_present,
2373 unsigned simd_mode,
2374 unsigned return_format)
2375 {
2376 const struct gen_device_info *devinfo = p->devinfo;
2377 brw_inst *insn;
2378
2379 if (msg_reg_nr != -1)
2380 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2381
2382 insn = next_insn(p, BRW_OPCODE_SEND);
2383 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2384 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2385
2386 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2387 *
2388 * "Instruction compression is not allowed for this instruction (that
2389 * is, send). The hardware behavior is undefined if this instruction is
2390 * set as compressed. However, compress control can be set to "SecHalf"
2391 * to affect the EMask generation."
2392 *
2393 * No similar wording is found in later PRMs, but there are examples
2394 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2395 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2396 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2397 */
2398 brw_inst_set_compression(devinfo, insn, false);
2399
2400 if (devinfo->gen < 6)
2401 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2402
2403 brw_set_dest(p, insn, dest);
2404 brw_set_src0(p, insn, src0);
2405 brw_set_desc(p, insn,
2406 brw_message_desc(devinfo, msg_length, response_length,
2407 header_present) |
2408 brw_sampler_desc(devinfo, binding_table_index, sampler,
2409 msg_type, simd_mode, return_format));
2410 }
2411
2412 /* Adjust the message header's sampler state pointer to
2413 * select the correct group of 16 samplers.
2414 */
2415 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2416 struct brw_reg header,
2417 struct brw_reg sampler_index)
2418 {
2419 /* The "Sampler Index" field can only store values between 0 and 15.
2420 * However, we can add an offset to the "Sampler State Pointer"
2421 * field, effectively selecting a different set of 16 samplers.
2422 *
2423 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2424 * offset, and each sampler state is only 16-bytes, so we can't
2425 * exclusively use the offset - we have to use both.
2426 */
2427
2428 const struct gen_device_info *devinfo = p->devinfo;
2429
2430 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2431 const int sampler_state_size = 16; /* 16 bytes */
2432 uint32_t sampler = sampler_index.ud;
2433
2434 if (sampler >= 16) {
2435 assert(devinfo->is_haswell || devinfo->gen >= 8);
2436 brw_ADD(p,
2437 get_element_ud(header, 3),
2438 get_element_ud(brw_vec8_grf(0, 0), 3),
2439 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2440 }
2441 } else {
2442 /* Non-const sampler array indexing case */
2443 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2444 return;
2445 }
2446
2447 struct brw_reg temp = get_element_ud(header, 3);
2448
2449 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2450 brw_SHL(p, temp, temp, brw_imm_ud(4));
2451 brw_ADD(p,
2452 get_element_ud(header, 3),
2453 get_element_ud(brw_vec8_grf(0, 0), 3),
2454 temp);
2455 }
2456 }
2457
2458 /* All these variables are pretty confusing - we might be better off
2459 * using bitmasks and macros for this, in the old style. Or perhaps
2460 * just having the caller instantiate the fields in dword3 itself.
2461 */
2462 void brw_urb_WRITE(struct brw_codegen *p,
2463 struct brw_reg dest,
2464 unsigned msg_reg_nr,
2465 struct brw_reg src0,
2466 enum brw_urb_write_flags flags,
2467 unsigned msg_length,
2468 unsigned response_length,
2469 unsigned offset,
2470 unsigned swizzle)
2471 {
2472 const struct gen_device_info *devinfo = p->devinfo;
2473 brw_inst *insn;
2474
2475 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2476
2477 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2478 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2479 brw_push_insn_state(p);
2480 brw_set_default_access_mode(p, BRW_ALIGN_1);
2481 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2482 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2483 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2484 BRW_REGISTER_TYPE_UD),
2485 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2486 brw_imm_ud(0xff00));
2487 brw_pop_insn_state(p);
2488 }
2489
2490 insn = next_insn(p, BRW_OPCODE_SEND);
2491
2492 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2493
2494 brw_set_dest(p, insn, dest);
2495 brw_set_src0(p, insn, src0);
2496 brw_set_src1(p, insn, brw_imm_d(0));
2497
2498 if (devinfo->gen < 6)
2499 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2500
2501 brw_set_urb_message(p,
2502 insn,
2503 flags,
2504 msg_length,
2505 response_length,
2506 offset,
2507 swizzle);
2508 }
2509
2510 void
2511 brw_send_indirect_message(struct brw_codegen *p,
2512 unsigned sfid,
2513 struct brw_reg dst,
2514 struct brw_reg payload,
2515 struct brw_reg desc,
2516 unsigned desc_imm,
2517 bool eot)
2518 {
2519 const struct gen_device_info *devinfo = p->devinfo;
2520 struct brw_inst *send;
2521
2522 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2523
2524 assert(desc.type == BRW_REGISTER_TYPE_UD);
2525
2526 if (desc.file == BRW_IMMEDIATE_VALUE) {
2527 send = next_insn(p, BRW_OPCODE_SEND);
2528 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2529 brw_set_desc(p, send, desc.ud | desc_imm);
2530 } else {
2531 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2532
2533 brw_push_insn_state(p);
2534 brw_set_default_access_mode(p, BRW_ALIGN_1);
2535 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2536 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2537 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2538
2539 /* Load the indirect descriptor to an address register using OR so the
2540 * caller can specify additional descriptor bits with the desc_imm
2541 * immediate.
2542 */
2543 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2544
2545 brw_pop_insn_state(p);
2546
2547 send = next_insn(p, BRW_OPCODE_SEND);
2548 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2549 brw_set_src1(p, send, addr);
2550 }
2551
2552 brw_set_dest(p, send, dst);
2553 brw_inst_set_sfid(devinfo, send, sfid);
2554 brw_inst_set_eot(devinfo, send, eot);
2555 }
2556
2557 void
2558 brw_send_indirect_split_message(struct brw_codegen *p,
2559 unsigned sfid,
2560 struct brw_reg dst,
2561 struct brw_reg payload0,
2562 struct brw_reg payload1,
2563 struct brw_reg desc,
2564 unsigned desc_imm,
2565 struct brw_reg ex_desc,
2566 unsigned ex_desc_imm,
2567 bool eot)
2568 {
2569 const struct gen_device_info *devinfo = p->devinfo;
2570 struct brw_inst *send;
2571
2572 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2573
2574 assert(desc.type == BRW_REGISTER_TYPE_UD);
2575
2576 if (desc.file == BRW_IMMEDIATE_VALUE) {
2577 desc.ud |= desc_imm;
2578 } else {
2579 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2580
2581 brw_push_insn_state(p);
2582 brw_set_default_access_mode(p, BRW_ALIGN_1);
2583 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2584 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2585 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2586
2587 /* Load the indirect descriptor to an address register using OR so the
2588 * caller can specify additional descriptor bits with the desc_imm
2589 * immediate.
2590 */
2591 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2592
2593 brw_pop_insn_state(p);
2594 desc = addr;
2595 }
2596
2597 if (ex_desc.file == BRW_IMMEDIATE_VALUE &&
2598 (ex_desc.ud & INTEL_MASK(15, 12)) == 0) {
2599 ex_desc.ud |= ex_desc_imm;
2600 } else {
2601 struct brw_reg addr = retype(brw_address_reg(2), BRW_REGISTER_TYPE_UD);
2602
2603 brw_push_insn_state(p);
2604 brw_set_default_access_mode(p, BRW_ALIGN_1);
2605 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2606 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2607 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2608
2609 /* Load the indirect extended descriptor to an address register using OR
2610 * so the caller can specify additional descriptor bits with the
2611 * desc_imm immediate.
2612 *
2613 * Even though the instruction dispatcher always pulls the SFID and EOT
2614 * fields from the instruction itself, actual external unit which
2615 * processes the message gets the SFID and EOT from the extended
2616 * descriptor which comes from the address register. If we don't OR
2617 * those two bits in, the external unit may get confused and hang.
2618 */
2619 unsigned imm_part = ex_desc_imm | sfid | eot << 5;
2620
2621 if (ex_desc.file == BRW_IMMEDIATE_VALUE) {
2622 /* ex_desc bits 15:12 don't exist in the instruction encoding, so
2623 * we may have fallen back to an indirect extended descriptor.
2624 */
2625 brw_MOV(p, addr, brw_imm_ud(ex_desc.ud | imm_part));
2626 } else {
2627 brw_OR(p, addr, ex_desc, brw_imm_ud(imm_part));
2628 }
2629
2630 brw_pop_insn_state(p);
2631 ex_desc = addr;
2632 }
2633
2634 send = next_insn(p, BRW_OPCODE_SENDS);
2635 brw_set_dest(p, send, dst);
2636 brw_set_src0(p, send, retype(payload0, BRW_REGISTER_TYPE_UD));
2637 brw_set_src1(p, send, retype(payload1, BRW_REGISTER_TYPE_UD));
2638
2639 if (desc.file == BRW_IMMEDIATE_VALUE) {
2640 brw_inst_set_send_sel_reg32_desc(devinfo, send, 0);
2641 brw_inst_set_send_desc(devinfo, send, desc.ud);
2642 } else {
2643 assert(desc.file == BRW_ARCHITECTURE_REGISTER_FILE);
2644 assert(desc.nr == BRW_ARF_ADDRESS);
2645 assert(desc.subnr == 0);
2646 brw_inst_set_send_sel_reg32_desc(devinfo, send, 1);
2647 }
2648
2649 if (ex_desc.file == BRW_IMMEDIATE_VALUE) {
2650 brw_inst_set_send_sel_reg32_ex_desc(devinfo, send, 0);
2651 brw_inst_set_send_ex_desc(devinfo, send, ex_desc.ud);
2652 } else {
2653 assert(ex_desc.file == BRW_ARCHITECTURE_REGISTER_FILE);
2654 assert(ex_desc.nr == BRW_ARF_ADDRESS);
2655 assert((ex_desc.subnr & 0x3) == 0);
2656 brw_inst_set_send_sel_reg32_ex_desc(devinfo, send, 1);
2657 brw_inst_set_send_ex_desc_ia_subreg_nr(devinfo, send, ex_desc.subnr >> 2);
2658 }
2659
2660 brw_inst_set_sfid(devinfo, send, sfid);
2661 brw_inst_set_eot(devinfo, send, eot);
2662 }
2663
2664 static void
2665 brw_send_indirect_surface_message(struct brw_codegen *p,
2666 unsigned sfid,
2667 struct brw_reg dst,
2668 struct brw_reg payload,
2669 struct brw_reg surface,
2670 unsigned desc_imm)
2671 {
2672 if (surface.file != BRW_IMMEDIATE_VALUE) {
2673 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2674
2675 brw_push_insn_state(p);
2676 brw_set_default_access_mode(p, BRW_ALIGN_1);
2677 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2678 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2679 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2680
2681 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2682 * some surface array is accessed out of bounds.
2683 */
2684 brw_AND(p, addr,
2685 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2686 BRW_GET_SWZ(surface.swizzle, 0)),
2687 brw_imm_ud(0xff));
2688
2689 brw_pop_insn_state(p);
2690
2691 surface = addr;
2692 }
2693
2694 brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm, false);
2695 }
2696
2697 static bool
2698 while_jumps_before_offset(const struct gen_device_info *devinfo,
2699 brw_inst *insn, int while_offset, int start_offset)
2700 {
2701 int scale = 16 / brw_jump_scale(devinfo);
2702 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2703 : brw_inst_jip(devinfo, insn);
2704 assert(jip < 0);
2705 return while_offset + jip * scale <= start_offset;
2706 }
2707
2708
2709 static int
2710 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2711 {
2712 int offset;
2713 void *store = p->store;
2714 const struct gen_device_info *devinfo = p->devinfo;
2715
2716 int depth = 0;
2717
2718 for (offset = next_offset(devinfo, store, start_offset);
2719 offset < p->next_insn_offset;
2720 offset = next_offset(devinfo, store, offset)) {
2721 brw_inst *insn = store + offset;
2722
2723 switch (brw_inst_opcode(devinfo, insn)) {
2724 case BRW_OPCODE_IF:
2725 depth++;
2726 break;
2727 case BRW_OPCODE_ENDIF:
2728 if (depth == 0)
2729 return offset;
2730 depth--;
2731 break;
2732 case BRW_OPCODE_WHILE:
2733 /* If the while doesn't jump before our instruction, it's the end
2734 * of a sibling do...while loop. Ignore it.
2735 */
2736 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2737 continue;
2738 /* fallthrough */
2739 case BRW_OPCODE_ELSE:
2740 case BRW_OPCODE_HALT:
2741 if (depth == 0)
2742 return offset;
2743 }
2744 }
2745
2746 return 0;
2747 }
2748
2749 /* There is no DO instruction on gen6, so to find the end of the loop
2750 * we have to see if the loop is jumping back before our start
2751 * instruction.
2752 */
2753 static int
2754 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2755 {
2756 const struct gen_device_info *devinfo = p->devinfo;
2757 int offset;
2758 void *store = p->store;
2759
2760 assert(devinfo->gen >= 6);
2761
2762 /* Always start after the instruction (such as a WHILE) we're trying to fix
2763 * up.
2764 */
2765 for (offset = next_offset(devinfo, store, start_offset);
2766 offset < p->next_insn_offset;
2767 offset = next_offset(devinfo, store, offset)) {
2768 brw_inst *insn = store + offset;
2769
2770 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2771 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2772 return offset;
2773 }
2774 }
2775 assert(!"not reached");
2776 return start_offset;
2777 }
2778
2779 /* After program generation, go back and update the UIP and JIP of
2780 * BREAK, CONT, and HALT instructions to their correct locations.
2781 */
2782 void
2783 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2784 {
2785 const struct gen_device_info *devinfo = p->devinfo;
2786 int offset;
2787 int br = brw_jump_scale(devinfo);
2788 int scale = 16 / br;
2789 void *store = p->store;
2790
2791 if (devinfo->gen < 6)
2792 return;
2793
2794 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2795 brw_inst *insn = store + offset;
2796 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2797
2798 int block_end_offset = brw_find_next_block_end(p, offset);
2799 switch (brw_inst_opcode(devinfo, insn)) {
2800 case BRW_OPCODE_BREAK:
2801 assert(block_end_offset != 0);
2802 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2803 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2804 brw_inst_set_uip(devinfo, insn,
2805 (brw_find_loop_end(p, offset) - offset +
2806 (devinfo->gen == 6 ? 16 : 0)) / scale);
2807 break;
2808 case BRW_OPCODE_CONTINUE:
2809 assert(block_end_offset != 0);
2810 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2811 brw_inst_set_uip(devinfo, insn,
2812 (brw_find_loop_end(p, offset) - offset) / scale);
2813
2814 assert(brw_inst_uip(devinfo, insn) != 0);
2815 assert(brw_inst_jip(devinfo, insn) != 0);
2816 break;
2817
2818 case BRW_OPCODE_ENDIF: {
2819 int32_t jump = (block_end_offset == 0) ?
2820 1 * br : (block_end_offset - offset) / scale;
2821 if (devinfo->gen >= 7)
2822 brw_inst_set_jip(devinfo, insn, jump);
2823 else
2824 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2825 break;
2826 }
2827
2828 case BRW_OPCODE_HALT:
2829 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2830 *
2831 * "In case of the halt instruction not inside any conditional
2832 * code block, the value of <JIP> and <UIP> should be the
2833 * same. In case of the halt instruction inside conditional code
2834 * block, the <UIP> should be the end of the program, and the
2835 * <JIP> should be end of the most inner conditional code block."
2836 *
2837 * The uip will have already been set by whoever set up the
2838 * instruction.
2839 */
2840 if (block_end_offset == 0) {
2841 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2842 } else {
2843 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2844 }
2845 assert(brw_inst_uip(devinfo, insn) != 0);
2846 assert(brw_inst_jip(devinfo, insn) != 0);
2847 break;
2848 }
2849 }
2850 }
2851
2852 void brw_ff_sync(struct brw_codegen *p,
2853 struct brw_reg dest,
2854 unsigned msg_reg_nr,
2855 struct brw_reg src0,
2856 bool allocate,
2857 unsigned response_length,
2858 bool eot)
2859 {
2860 const struct gen_device_info *devinfo = p->devinfo;
2861 brw_inst *insn;
2862
2863 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2864
2865 insn = next_insn(p, BRW_OPCODE_SEND);
2866 brw_set_dest(p, insn, dest);
2867 brw_set_src0(p, insn, src0);
2868 brw_set_src1(p, insn, brw_imm_d(0));
2869
2870 if (devinfo->gen < 6)
2871 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2872
2873 brw_set_ff_sync_message(p,
2874 insn,
2875 allocate,
2876 response_length,
2877 eot);
2878 }
2879
2880 /**
2881 * Emit the SEND instruction necessary to generate stream output data on Gen6
2882 * (for transform feedback).
2883 *
2884 * If send_commit_msg is true, this is the last piece of stream output data
2885 * from this thread, so send the data as a committed write. According to the
2886 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2887 *
2888 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2889 * writes are complete by sending the final write as a committed write."
2890 */
2891 void
2892 brw_svb_write(struct brw_codegen *p,
2893 struct brw_reg dest,
2894 unsigned msg_reg_nr,
2895 struct brw_reg src0,
2896 unsigned binding_table_index,
2897 bool send_commit_msg)
2898 {
2899 const struct gen_device_info *devinfo = p->devinfo;
2900 const unsigned target_cache =
2901 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2902 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2903 BRW_SFID_DATAPORT_WRITE);
2904 brw_inst *insn;
2905
2906 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2907
2908 insn = next_insn(p, BRW_OPCODE_SEND);
2909 brw_inst_set_sfid(devinfo, insn, target_cache);
2910 brw_set_dest(p, insn, dest);
2911 brw_set_src0(p, insn, src0);
2912 brw_set_desc(p, insn,
2913 brw_message_desc(devinfo, 1, send_commit_msg, true) |
2914 brw_dp_write_desc(devinfo, binding_table_index,
2915 0, /* msg_control: ignored */
2916 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2917 0, /* last_render_target: ignored */
2918 send_commit_msg)); /* send_commit_msg */
2919 }
2920
2921 static unsigned
2922 brw_surface_payload_size(struct brw_codegen *p,
2923 unsigned num_channels,
2924 unsigned exec_size /**< 0 for SIMD4x2 */)
2925 {
2926 if (exec_size == 0)
2927 return 1; /* SIMD4x2 */
2928 else if (exec_size <= 8)
2929 return num_channels;
2930 else
2931 return 2 * num_channels;
2932 }
2933
2934 void
2935 brw_untyped_atomic(struct brw_codegen *p,
2936 struct brw_reg dst,
2937 struct brw_reg payload,
2938 struct brw_reg surface,
2939 unsigned atomic_op,
2940 unsigned msg_length,
2941 bool response_expected,
2942 bool header_present)
2943 {
2944 const struct gen_device_info *devinfo = p->devinfo;
2945 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2946 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2947 GEN7_SFID_DATAPORT_DATA_CACHE);
2948 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2949 /* SIMD4x2 untyped atomic instructions only exist on HSW+ */
2950 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
2951 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) :
2952 has_simd4x2 ? 0 : 8;
2953 const unsigned response_length =
2954 brw_surface_payload_size(p, response_expected, exec_size);
2955 const unsigned desc =
2956 brw_message_desc(devinfo, msg_length, response_length, header_present) |
2957 brw_dp_untyped_atomic_desc(devinfo, exec_size, atomic_op,
2958 response_expected);
2959 /* Mask out unused components -- This is especially important in Align16
2960 * mode on generations that don't have native support for SIMD4x2 atomics,
2961 * because unused but enabled components will cause the dataport to perform
2962 * additional atomic operations on the addresses that happen to be in the
2963 * uninitialized Y, Z and W coordinates of the payload.
2964 */
2965 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2966
2967 brw_send_indirect_surface_message(p, sfid, brw_writemask(dst, mask),
2968 payload, surface, desc);
2969 }
2970
2971 void
2972 brw_untyped_surface_read(struct brw_codegen *p,
2973 struct brw_reg dst,
2974 struct brw_reg payload,
2975 struct brw_reg surface,
2976 unsigned msg_length,
2977 unsigned num_channels)
2978 {
2979 const struct gen_device_info *devinfo = p->devinfo;
2980 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2981 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2982 GEN7_SFID_DATAPORT_DATA_CACHE);
2983 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2984 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) : 0;
2985 const unsigned response_length =
2986 brw_surface_payload_size(p, num_channels, exec_size);
2987 const unsigned desc =
2988 brw_message_desc(devinfo, msg_length, response_length, false) |
2989 brw_dp_untyped_surface_rw_desc(devinfo, exec_size, num_channels, false);
2990
2991 brw_send_indirect_surface_message(p, sfid, dst, payload, surface, desc);
2992 }
2993
2994 void
2995 brw_untyped_surface_write(struct brw_codegen *p,
2996 struct brw_reg payload,
2997 struct brw_reg surface,
2998 unsigned msg_length,
2999 unsigned num_channels,
3000 bool header_present)
3001 {
3002 const struct gen_device_info *devinfo = p->devinfo;
3003 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3004 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3005 GEN7_SFID_DATAPORT_DATA_CACHE);
3006 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3007 /* SIMD4x2 untyped surface write instructions only exist on HSW+ */
3008 const bool has_simd4x2 = devinfo->gen >= 8 || devinfo->is_haswell;
3009 const unsigned exec_size = align1 ? 1 << brw_get_default_exec_size(p) :
3010 has_simd4x2 ? 0 : 8;
3011 const unsigned desc =
3012 brw_message_desc(devinfo, msg_length, 0, header_present) |
3013 brw_dp_untyped_surface_rw_desc(devinfo, exec_size, num_channels, true);
3014 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3015 const unsigned mask = !has_simd4x2 && !align1 ? WRITEMASK_X : WRITEMASK_XYZW;
3016
3017 brw_send_indirect_surface_message(p, sfid, brw_writemask(brw_null_reg(), mask),
3018 payload, surface, desc);
3019 }
3020
3021 static void
3022 brw_set_memory_fence_message(struct brw_codegen *p,
3023 struct brw_inst *insn,
3024 enum brw_message_target sfid,
3025 bool commit_enable,
3026 unsigned bti)
3027 {
3028 const struct gen_device_info *devinfo = p->devinfo;
3029
3030 brw_set_desc(p, insn, brw_message_desc(
3031 devinfo, 1, (commit_enable ? 1 : 0), true));
3032
3033 brw_inst_set_sfid(devinfo, insn, sfid);
3034
3035 switch (sfid) {
3036 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3037 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3038 break;
3039 case GEN7_SFID_DATAPORT_DATA_CACHE:
3040 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3041 break;
3042 default:
3043 unreachable("Not reached");
3044 }
3045
3046 if (commit_enable)
3047 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3048
3049 assert(devinfo->gen >= 11 || bti == 0);
3050 brw_inst_set_binding_table_index(devinfo, insn, bti);
3051 }
3052
3053 void
3054 brw_memory_fence(struct brw_codegen *p,
3055 struct brw_reg dst,
3056 struct brw_reg src,
3057 enum opcode send_op,
3058 bool stall,
3059 unsigned bti)
3060 {
3061 const struct gen_device_info *devinfo = p->devinfo;
3062 const bool commit_enable = stall ||
3063 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3064 (devinfo->gen == 7 && !devinfo->is_haswell);
3065 struct brw_inst *insn;
3066
3067 brw_push_insn_state(p);
3068 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3069 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3070 dst = retype(vec1(dst), BRW_REGISTER_TYPE_UW);
3071 src = retype(vec1(src), BRW_REGISTER_TYPE_UD);
3072
3073 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3074 * message doesn't write anything back.
3075 */
3076 insn = next_insn(p, send_op);
3077 brw_set_dest(p, insn, dst);
3078 brw_set_src0(p, insn, src);
3079 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3080 commit_enable, bti);
3081
3082 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3083 /* IVB does typed surface access through the render cache, so we need to
3084 * flush it too. Use a different register so both flushes can be
3085 * pipelined by the hardware.
3086 */
3087 insn = next_insn(p, send_op);
3088 brw_set_dest(p, insn, offset(dst, 1));
3089 brw_set_src0(p, insn, src);
3090 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3091 commit_enable, bti);
3092
3093 /* Now write the response of the second message into the response of the
3094 * first to trigger a pipeline stall -- This way future render and data
3095 * cache messages will be properly ordered with respect to past data and
3096 * render cache messages.
3097 */
3098 brw_MOV(p, dst, offset(dst, 1));
3099 }
3100
3101 if (stall)
3102 brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW), dst);
3103
3104 brw_pop_insn_state(p);
3105 }
3106
3107 void
3108 brw_pixel_interpolator_query(struct brw_codegen *p,
3109 struct brw_reg dest,
3110 struct brw_reg mrf,
3111 bool noperspective,
3112 unsigned mode,
3113 struct brw_reg data,
3114 unsigned msg_length,
3115 unsigned response_length)
3116 {
3117 const struct gen_device_info *devinfo = p->devinfo;
3118 const uint16_t exec_size = brw_get_default_exec_size(p);
3119 const unsigned slot_group = brw_get_default_group(p) / 16;
3120 const unsigned simd_mode = (exec_size == BRW_EXECUTE_16);
3121 const unsigned desc =
3122 brw_message_desc(devinfo, msg_length, response_length, false) |
3123 brw_pixel_interp_desc(devinfo, mode, noperspective, simd_mode,
3124 slot_group);
3125
3126 /* brw_send_indirect_message will automatically use a direct send message
3127 * if data is actually immediate.
3128 */
3129 brw_send_indirect_message(p,
3130 GEN7_SFID_PIXEL_INTERPOLATOR,
3131 dest,
3132 mrf,
3133 vec1(data),
3134 desc,
3135 false);
3136 }
3137
3138 void
3139 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3140 struct brw_reg mask)
3141 {
3142 const struct gen_device_info *devinfo = p->devinfo;
3143 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3144 const unsigned qtr_control = brw_get_default_group(p) / 8;
3145 brw_inst *inst;
3146
3147 assert(devinfo->gen >= 7);
3148 assert(mask.type == BRW_REGISTER_TYPE_UD);
3149
3150 brw_push_insn_state(p);
3151
3152 /* The flag register is only used on Gen7 in align1 mode, so avoid setting
3153 * unnecessary bits in the instruction words, get the information we need
3154 * and reset the default flag register. This allows more instructions to be
3155 * compacted.
3156 */
3157 const unsigned flag_subreg = p->current->flag_subreg;
3158 brw_set_default_flag_reg(p, 0, 0);
3159
3160 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3161 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3162
3163 if (devinfo->gen >= 8) {
3164 /* Getting the first active channel index is easy on Gen8: Just find
3165 * the first bit set in the execution mask. The register exists on
3166 * HSW already but it reads back as all ones when the current
3167 * instruction has execution masking disabled, so it's kind of
3168 * useless.
3169 */
3170 struct brw_reg exec_mask =
3171 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3172
3173 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3174 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3175 /* Unfortunately, ce0 does not take into account the thread
3176 * dispatch mask, which may be a problem in cases where it's not
3177 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3178 * some n). Combine ce0 with the given dispatch (or vector) mask
3179 * to mask off those channels which were never dispatched by the
3180 * hardware.
3181 */
3182 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3183 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3184 exec_mask = vec1(dst);
3185 }
3186
3187 /* Quarter control has the effect of magically shifting the value of
3188 * ce0 so you'll get the first active channel relative to the
3189 * specified quarter control as result.
3190 */
3191 inst = brw_FBL(p, vec1(dst), exec_mask);
3192 } else {
3193 const struct brw_reg flag = brw_flag_subreg(flag_subreg);
3194
3195 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3196 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3197
3198 /* Run enough instructions returning zero with execution masking and
3199 * a conditional modifier enabled in order to get the full execution
3200 * mask in f1.0. We could use a single 32-wide move here if it
3201 * weren't because of the hardware bug that causes channel enables to
3202 * be applied incorrectly to the second half of 32-wide instructions
3203 * on Gen7.
3204 */
3205 const unsigned lower_size = MIN2(16, exec_size);
3206 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3207 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3208 brw_imm_uw(0));
3209 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3210 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3211 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3212 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3213 brw_inst_set_flag_reg_nr(devinfo, inst, flag_subreg / 2);
3214 brw_inst_set_flag_subreg_nr(devinfo, inst, flag_subreg % 2);
3215 }
3216
3217 /* Find the first bit set in the exec_size-wide portion of the flag
3218 * register that was updated by the last sequence of MOV
3219 * instructions.
3220 */
3221 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3222 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3223 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3224 }
3225 } else {
3226 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3227
3228 if (devinfo->gen >= 8 &&
3229 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3230 /* In SIMD4x2 mode the first active channel index is just the
3231 * negation of the first bit of the mask register. Note that ce0
3232 * doesn't take into account the dispatch mask, so the Gen7 path
3233 * should be used instead unless you have the guarantee that the
3234 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3235 * for some n).
3236 */
3237 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3238 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3239 brw_imm_ud(1));
3240
3241 } else {
3242 /* Overwrite the destination without and with execution masking to
3243 * find out which of the channels is active.
3244 */
3245 brw_push_insn_state(p);
3246 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3247 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3248 brw_imm_ud(1));
3249
3250 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3251 brw_imm_ud(0));
3252 brw_pop_insn_state(p);
3253 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3254 }
3255 }
3256
3257 brw_pop_insn_state(p);
3258 }
3259
3260 void
3261 brw_broadcast(struct brw_codegen *p,
3262 struct brw_reg dst,
3263 struct brw_reg src,
3264 struct brw_reg idx)
3265 {
3266 const struct gen_device_info *devinfo = p->devinfo;
3267 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3268 brw_inst *inst;
3269
3270 brw_push_insn_state(p);
3271 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3272 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3273
3274 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3275 src.address_mode == BRW_ADDRESS_DIRECT);
3276 assert(!src.abs && !src.negate);
3277 assert(src.type == dst.type);
3278
3279 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3280 idx.file == BRW_IMMEDIATE_VALUE) {
3281 /* Trivial, the source is already uniform or the index is a constant.
3282 * We will typically not get here if the optimizer is doing its job, but
3283 * asserting would be mean.
3284 */
3285 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3286 brw_MOV(p, dst,
3287 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3288 stride(suboffset(src, 4 * i), 0, 4, 1)));
3289 } else {
3290 /* From the Haswell PRM section "Register Region Restrictions":
3291 *
3292 * "The lower bits of the AddressImmediate must not overflow to
3293 * change the register address. The lower 5 bits of Address
3294 * Immediate when added to lower 5 bits of address register gives
3295 * the sub-register offset. The upper bits of Address Immediate
3296 * when added to upper bits of address register gives the register
3297 * address. Any overflow from sub-register offset is dropped."
3298 *
3299 * Fortunately, for broadcast, we never have a sub-register offset so
3300 * this isn't an issue.
3301 */
3302 assert(src.subnr == 0);
3303
3304 if (align1) {
3305 const struct brw_reg addr =
3306 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3307 unsigned offset = src.nr * REG_SIZE + src.subnr;
3308 /* Limit in bytes of the signed indirect addressing immediate. */
3309 const unsigned limit = 512;
3310
3311 brw_push_insn_state(p);
3312 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3313 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3314
3315 /* Take into account the component size and horizontal stride. */
3316 assert(src.vstride == src.hstride + src.width);
3317 brw_SHL(p, addr, vec1(idx),
3318 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3319 src.hstride - 1));
3320
3321 /* We can only address up to limit bytes using the indirect
3322 * addressing immediate, account for the difference if the source
3323 * register is above this limit.
3324 */
3325 if (offset >= limit) {
3326 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3327 offset = offset % limit;
3328 }
3329
3330 brw_pop_insn_state(p);
3331
3332 /* Use indirect addressing to fetch the specified component. */
3333 if (type_sz(src.type) > 4 &&
3334 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3335 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3336 *
3337 * "When source or destination datatype is 64b or operation is
3338 * integer DWord multiply, indirect addressing must not be
3339 * used."
3340 *
3341 * To work around both of this issue, we do two integer MOVs
3342 * insead of one 64-bit MOV. Because no double value should ever
3343 * cross a register boundary, it's safe to use the immediate
3344 * offset in the indirect here to handle adding 4 bytes to the
3345 * offset and avoid the extra ADD to the register file.
3346 */
3347 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3348 retype(brw_vec1_indirect(addr.subnr, offset),
3349 BRW_REGISTER_TYPE_D));
3350 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3351 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3352 BRW_REGISTER_TYPE_D));
3353 } else {
3354 brw_MOV(p, dst,
3355 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3356 }
3357 } else {
3358 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3359 * to all bits of a flag register,
3360 */
3361 inst = brw_MOV(p,
3362 brw_null_reg(),
3363 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3364 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3365 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3366 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3367
3368 /* and use predicated SEL to pick the right channel. */
3369 inst = brw_SEL(p, dst,
3370 stride(suboffset(src, 4), 4, 4, 1),
3371 stride(src, 4, 4, 1));
3372 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3373 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3374 }
3375 }
3376
3377 brw_pop_insn_state(p);
3378 }
3379
3380 /**
3381 * This instruction is generated as a single-channel align1 instruction by
3382 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3383 *
3384 * We can't use the typed atomic op in the FS because that has the execution
3385 * mask ANDed with the pixel mask, but we just want to write the one dword for
3386 * all the pixels.
3387 *
3388 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3389 * one u32. So we use the same untyped atomic write message as the pixel
3390 * shader.
3391 *
3392 * The untyped atomic operation requires a BUFFER surface type with RAW
3393 * format, and is only accessible through the legacy DATA_CACHE dataport
3394 * messages.
3395 */
3396 void brw_shader_time_add(struct brw_codegen *p,
3397 struct brw_reg payload,
3398 uint32_t surf_index)
3399 {
3400 const struct gen_device_info *devinfo = p->devinfo;
3401 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3402 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3403 GEN7_SFID_DATAPORT_DATA_CACHE);
3404 assert(devinfo->gen >= 7);
3405
3406 brw_push_insn_state(p);
3407 brw_set_default_access_mode(p, BRW_ALIGN_1);
3408 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3409 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3410 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3411
3412 /* We use brw_vec1_reg and unmasked because we want to increment the given
3413 * offset only once.
3414 */
3415 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3416 BRW_ARF_NULL, 0));
3417 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3418 payload.nr, 0));
3419 brw_set_desc(p, send, (brw_message_desc(devinfo, 2, 0, false) |
3420 brw_dp_untyped_atomic_desc(devinfo, 1, BRW_AOP_ADD,
3421 false)));
3422
3423 brw_inst_set_sfid(devinfo, send, sfid);
3424 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3425
3426 brw_pop_insn_state(p);
3427 }
3428
3429
3430 /**
3431 * Emit the SEND message for a barrier
3432 */
3433 void
3434 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3435 {
3436 const struct gen_device_info *devinfo = p->devinfo;
3437 struct brw_inst *inst;
3438
3439 assert(devinfo->gen >= 7);
3440
3441 brw_push_insn_state(p);
3442 brw_set_default_access_mode(p, BRW_ALIGN_1);
3443 inst = next_insn(p, BRW_OPCODE_SEND);
3444 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3445 brw_set_src0(p, inst, src);
3446 brw_set_src1(p, inst, brw_null_reg());
3447 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3448
3449 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3450 brw_inst_set_gateway_notify(devinfo, inst, 1);
3451 brw_inst_set_gateway_subfuncid(devinfo, inst,
3452 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3453
3454 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3455 brw_pop_insn_state(p);
3456 }
3457
3458
3459 /**
3460 * Emit the wait instruction for a barrier
3461 */
3462 void
3463 brw_WAIT(struct brw_codegen *p)
3464 {
3465 const struct gen_device_info *devinfo = p->devinfo;
3466 struct brw_inst *insn;
3467
3468 struct brw_reg src = brw_notification_reg();
3469
3470 insn = next_insn(p, BRW_OPCODE_WAIT);
3471 brw_set_dest(p, insn, src);
3472 brw_set_src0(p, insn, src);
3473 brw_set_src1(p, insn, brw_null_reg());
3474
3475 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3476 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3477 }
3478
3479 /**
3480 * Changes the floating point rounding mode updating the control register
3481 * field defined at cr0.0[5-6] bits. This function supports the changes to
3482 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3483 * Only RTNE and RTZ rounding are enabled at nir.
3484 */
3485 void
3486 brw_rounding_mode(struct brw_codegen *p,
3487 enum brw_rnd_mode mode)
3488 {
3489 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3490
3491 if (bits != BRW_CR0_RND_MODE_MASK) {
3492 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3493 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3494 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3495
3496 /* From the Skylake PRM, Volume 7, page 760:
3497 * "Implementation Restriction on Register Access: When the control
3498 * register is used as an explicit source and/or destination, hardware
3499 * does not ensure execution pipeline coherency. Software must set the
3500 * thread control field to ‘switch’ for an instruction that uses
3501 * control register as an explicit operand."
3502 */
3503 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3504 }
3505
3506 if (bits) {
3507 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3508 brw_imm_ud(bits));
3509 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3510 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3511 }
3512 }