intel/eu: Use descriptor constructors for dataport read messages.
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Specify the descriptor and extended descriptor immediate for a SEND(C)
364 * message instruction.
365 */
366 void
367 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
368 unsigned desc, unsigned ex_desc)
369 {
370 const struct gen_device_info *devinfo = p->devinfo;
371 brw_inst_set_src1_file_type(devinfo, inst,
372 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_D);
373 brw_inst_set_send_desc(devinfo, inst, desc);
374 if (devinfo->gen >= 9 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
375 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC))
376 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
377 }
378
379 static void brw_set_math_message( struct brw_codegen *p,
380 brw_inst *inst,
381 unsigned function,
382 unsigned integer_type,
383 bool low_precision,
384 unsigned dataType )
385 {
386 const struct gen_device_info *devinfo = p->devinfo;
387 unsigned msg_length;
388 unsigned response_length;
389
390 /* Infer message length from the function */
391 switch (function) {
392 case BRW_MATH_FUNCTION_POW:
393 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
394 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
395 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
396 msg_length = 2;
397 break;
398 default:
399 msg_length = 1;
400 break;
401 }
402
403 /* Infer response length from the function */
404 switch (function) {
405 case BRW_MATH_FUNCTION_SINCOS:
406 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
407 response_length = 2;
408 break;
409 default:
410 response_length = 1;
411 break;
412 }
413
414 brw_set_desc(p, inst, brw_message_desc(
415 devinfo, msg_length, response_length, false));
416
417 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
418 brw_inst_set_math_msg_function(devinfo, inst, function);
419 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
420 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
421 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
422 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
423 brw_inst_set_saturate(devinfo, inst, 0);
424 }
425
426
427 static void brw_set_ff_sync_message(struct brw_codegen *p,
428 brw_inst *insn,
429 bool allocate,
430 unsigned response_length,
431 bool end_of_thread)
432 {
433 const struct gen_device_info *devinfo = p->devinfo;
434
435 brw_set_desc(p, insn, brw_message_desc(
436 devinfo, 1, response_length, true));
437
438 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
439 brw_inst_set_eot(devinfo, insn, end_of_thread);
440 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
441 brw_inst_set_urb_allocate(devinfo, insn, allocate);
442 /* The following fields are not used by FF_SYNC: */
443 brw_inst_set_urb_global_offset(devinfo, insn, 0);
444 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
445 brw_inst_set_urb_used(devinfo, insn, 0);
446 brw_inst_set_urb_complete(devinfo, insn, 0);
447 }
448
449 static void brw_set_urb_message( struct brw_codegen *p,
450 brw_inst *insn,
451 enum brw_urb_write_flags flags,
452 unsigned msg_length,
453 unsigned response_length,
454 unsigned offset,
455 unsigned swizzle_control )
456 {
457 const struct gen_device_info *devinfo = p->devinfo;
458
459 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
460 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
461 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
462
463 brw_set_desc(p, insn, brw_message_desc(
464 devinfo, msg_length, response_length, true));
465
466 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
467 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
468
469 if (flags & BRW_URB_WRITE_OWORD) {
470 assert(msg_length == 2); /* header + one OWORD of data */
471 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
472 } else {
473 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
474 }
475
476 brw_inst_set_urb_global_offset(devinfo, insn, offset);
477 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
478
479 if (devinfo->gen < 8) {
480 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
481 }
482
483 if (devinfo->gen < 7) {
484 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
485 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
486 } else {
487 brw_inst_set_urb_per_slot_offset(devinfo, insn,
488 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
489 }
490 }
491
492 void
493 brw_set_dp_write_message(struct brw_codegen *p,
494 brw_inst *insn,
495 unsigned binding_table_index,
496 unsigned msg_control,
497 unsigned msg_type,
498 unsigned target_cache,
499 unsigned msg_length,
500 bool header_present,
501 unsigned last_render_target,
502 unsigned response_length,
503 unsigned end_of_thread,
504 unsigned send_commit_msg)
505 {
506 const struct gen_device_info *devinfo = p->devinfo;
507 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
508 BRW_SFID_DATAPORT_WRITE);
509
510 brw_set_desc(p, insn, brw_message_desc(
511 devinfo, msg_length, response_length, header_present));
512
513 brw_inst_set_sfid(devinfo, insn, sfid);
514 brw_inst_set_eot(devinfo, insn, !!end_of_thread);
515 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
516 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
517 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
518 brw_inst_set_rt_last(devinfo, insn, last_render_target);
519 if (devinfo->gen < 7) {
520 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
521 }
522
523 if (devinfo->gen >= 11)
524 brw_inst_set_null_rt(devinfo, insn, false);
525 }
526
527 static void
528 gen7_set_dp_scratch_message(struct brw_codegen *p,
529 brw_inst *inst,
530 bool write,
531 bool dword,
532 bool invalidate_after_read,
533 unsigned num_regs,
534 unsigned addr_offset,
535 unsigned mlen,
536 unsigned rlen,
537 bool header_present)
538 {
539 const struct gen_device_info *devinfo = p->devinfo;
540 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
541 (devinfo->gen >= 8 && num_regs == 8));
542 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
543 num_regs - 1);
544
545 brw_set_desc(p, inst, brw_message_desc(
546 devinfo, mlen, rlen, header_present));
547
548 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
549 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
550 brw_inst_set_scratch_read_write(devinfo, inst, write);
551 brw_inst_set_scratch_type(devinfo, inst, dword);
552 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
553 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
554 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
555 }
556
557 static void
558 brw_inst_set_state(const struct gen_device_info *devinfo,
559 brw_inst *insn,
560 const struct brw_insn_state *state)
561 {
562 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
563 brw_inst_set_group(devinfo, insn, state->group);
564 brw_inst_set_compression(devinfo, insn, state->compressed);
565 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
566 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
567 brw_inst_set_saturate(devinfo, insn, state->saturate);
568 brw_inst_set_pred_control(devinfo, insn, state->predicate);
569 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
570
571 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
572 state->access_mode == BRW_ALIGN_16) {
573 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
574 if (devinfo->gen >= 7)
575 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
576 } else {
577 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
578 if (devinfo->gen >= 7)
579 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
580 }
581
582 if (devinfo->gen >= 6)
583 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
584 }
585
586 #define next_insn brw_next_insn
587 brw_inst *
588 brw_next_insn(struct brw_codegen *p, unsigned opcode)
589 {
590 const struct gen_device_info *devinfo = p->devinfo;
591 brw_inst *insn;
592
593 if (p->nr_insn + 1 > p->store_size) {
594 p->store_size <<= 1;
595 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
596 }
597
598 p->next_insn_offset += 16;
599 insn = &p->store[p->nr_insn++];
600
601 memset(insn, 0, sizeof(*insn));
602 brw_inst_set_opcode(devinfo, insn, opcode);
603
604 /* Apply the default instruction state */
605 brw_inst_set_state(devinfo, insn, p->current);
606
607 return insn;
608 }
609
610 static brw_inst *
611 brw_alu1(struct brw_codegen *p, unsigned opcode,
612 struct brw_reg dest, struct brw_reg src)
613 {
614 brw_inst *insn = next_insn(p, opcode);
615 brw_set_dest(p, insn, dest);
616 brw_set_src0(p, insn, src);
617 return insn;
618 }
619
620 static brw_inst *
621 brw_alu2(struct brw_codegen *p, unsigned opcode,
622 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
623 {
624 /* 64-bit immediates are only supported on 1-src instructions */
625 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
626 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
627
628 brw_inst *insn = next_insn(p, opcode);
629 brw_set_dest(p, insn, dest);
630 brw_set_src0(p, insn, src0);
631 brw_set_src1(p, insn, src1);
632 return insn;
633 }
634
635 static int
636 get_3src_subreg_nr(struct brw_reg reg)
637 {
638 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
639 * use 32-bit units (components 0..7). Since they only support F/D/UD
640 * types, this doesn't lose any flexibility, but uses fewer bits.
641 */
642 return reg.subnr / 4;
643 }
644
645 static enum gen10_align1_3src_vertical_stride
646 to_3src_align1_vstride(enum brw_vertical_stride vstride)
647 {
648 switch (vstride) {
649 case BRW_VERTICAL_STRIDE_0:
650 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
651 case BRW_VERTICAL_STRIDE_2:
652 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
653 case BRW_VERTICAL_STRIDE_4:
654 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
655 case BRW_VERTICAL_STRIDE_8:
656 case BRW_VERTICAL_STRIDE_16:
657 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
658 default:
659 unreachable("invalid vstride");
660 }
661 }
662
663
664 static enum gen10_align1_3src_src_horizontal_stride
665 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
666 {
667 switch (hstride) {
668 case BRW_HORIZONTAL_STRIDE_0:
669 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
670 case BRW_HORIZONTAL_STRIDE_1:
671 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
672 case BRW_HORIZONTAL_STRIDE_2:
673 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
674 case BRW_HORIZONTAL_STRIDE_4:
675 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
676 default:
677 unreachable("invalid hstride");
678 }
679 }
680
681 static brw_inst *
682 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
683 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
684 {
685 const struct gen_device_info *devinfo = p->devinfo;
686 brw_inst *inst = next_insn(p, opcode);
687
688 gen7_convert_mrf_to_grf(p, &dest);
689
690 assert(dest.nr < 128);
691 assert(src0.nr < 128);
692 assert(src1.nr < 128);
693 assert(src2.nr < 128);
694 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
695 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
696 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
697 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
698
699 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
700 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
701 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
702
703 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
704 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
705 BRW_ALIGN1_3SRC_ACCUMULATOR);
706 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
707 } else {
708 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
709 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
710 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
711 }
712 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
713
714 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
715
716 if (brw_reg_type_is_floating_point(dest.type)) {
717 brw_inst_set_3src_a1_exec_type(devinfo, inst,
718 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
719 } else {
720 brw_inst_set_3src_a1_exec_type(devinfo, inst,
721 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
722 }
723
724 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
725 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
726 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
727 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
728
729 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
730 to_3src_align1_vstride(src0.vstride));
731 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
732 to_3src_align1_vstride(src1.vstride));
733 /* no vstride on src2 */
734
735 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
736 to_3src_align1_hstride(src0.hstride));
737 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
738 to_3src_align1_hstride(src1.hstride));
739 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
740 to_3src_align1_hstride(src2.hstride));
741
742 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
743 if (src0.type == BRW_REGISTER_TYPE_NF) {
744 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
745 } else {
746 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
747 }
748 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
749 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
750
751 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
752 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
753 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
754 } else {
755 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
756 }
757 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
758 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
759
760 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
761 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
762 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
763 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
764
765 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
766 src0.file == BRW_IMMEDIATE_VALUE ||
767 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
768 src0.type == BRW_REGISTER_TYPE_NF));
769 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
770 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
771 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
772 src2.file == BRW_IMMEDIATE_VALUE);
773
774 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
775 src0.file == BRW_GENERAL_REGISTER_FILE ?
776 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
777 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
778 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
779 src1.file == BRW_GENERAL_REGISTER_FILE ?
780 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
781 BRW_ALIGN1_3SRC_ACCUMULATOR);
782 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
783 src2.file == BRW_GENERAL_REGISTER_FILE ?
784 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
785 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
786 } else {
787 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
788 dest.file == BRW_MESSAGE_REGISTER_FILE);
789 assert(dest.type == BRW_REGISTER_TYPE_F ||
790 dest.type == BRW_REGISTER_TYPE_DF ||
791 dest.type == BRW_REGISTER_TYPE_D ||
792 dest.type == BRW_REGISTER_TYPE_UD);
793 if (devinfo->gen == 6) {
794 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
795 dest.file == BRW_MESSAGE_REGISTER_FILE);
796 }
797 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
798 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
799 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
800
801 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
802 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
803 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
804 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
805 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
806 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
807 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
808 src0.vstride == BRW_VERTICAL_STRIDE_0);
809
810 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
811 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
812 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
813 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
814 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
815 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
816 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
817 src1.vstride == BRW_VERTICAL_STRIDE_0);
818
819 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
820 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
821 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
822 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
823 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
824 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
825 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
826 src2.vstride == BRW_VERTICAL_STRIDE_0);
827
828 if (devinfo->gen >= 7) {
829 /* Set both the source and destination types based on dest.type,
830 * ignoring the source register types. The MAD and LRP emitters ensure
831 * that all four types are float. The BFE and BFI2 emitters, however,
832 * may send us mixed D and UD types and want us to ignore that and use
833 * the destination type.
834 */
835 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
836 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
837 }
838 }
839
840 return inst;
841 }
842
843
844 /***********************************************************************
845 * Convenience routines.
846 */
847 #define ALU1(OP) \
848 brw_inst *brw_##OP(struct brw_codegen *p, \
849 struct brw_reg dest, \
850 struct brw_reg src0) \
851 { \
852 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
853 }
854
855 #define ALU2(OP) \
856 brw_inst *brw_##OP(struct brw_codegen *p, \
857 struct brw_reg dest, \
858 struct brw_reg src0, \
859 struct brw_reg src1) \
860 { \
861 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
862 }
863
864 #define ALU3(OP) \
865 brw_inst *brw_##OP(struct brw_codegen *p, \
866 struct brw_reg dest, \
867 struct brw_reg src0, \
868 struct brw_reg src1, \
869 struct brw_reg src2) \
870 { \
871 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
872 }
873
874 #define ALU3F(OP) \
875 brw_inst *brw_##OP(struct brw_codegen *p, \
876 struct brw_reg dest, \
877 struct brw_reg src0, \
878 struct brw_reg src1, \
879 struct brw_reg src2) \
880 { \
881 assert(dest.type == BRW_REGISTER_TYPE_F || \
882 dest.type == BRW_REGISTER_TYPE_DF); \
883 if (dest.type == BRW_REGISTER_TYPE_F) { \
884 assert(src0.type == BRW_REGISTER_TYPE_F); \
885 assert(src1.type == BRW_REGISTER_TYPE_F); \
886 assert(src2.type == BRW_REGISTER_TYPE_F); \
887 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
888 assert(src0.type == BRW_REGISTER_TYPE_DF); \
889 assert(src1.type == BRW_REGISTER_TYPE_DF); \
890 assert(src2.type == BRW_REGISTER_TYPE_DF); \
891 } \
892 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
893 }
894
895 /* Rounding operations (other than RNDD) require two instructions - the first
896 * stores a rounded value (possibly the wrong way) in the dest register, but
897 * also sets a per-channel "increment bit" in the flag register. A predicated
898 * add of 1.0 fixes dest to contain the desired result.
899 *
900 * Sandybridge and later appear to round correctly without an ADD.
901 */
902 #define ROUND(OP) \
903 void brw_##OP(struct brw_codegen *p, \
904 struct brw_reg dest, \
905 struct brw_reg src) \
906 { \
907 const struct gen_device_info *devinfo = p->devinfo; \
908 brw_inst *rnd, *add; \
909 rnd = next_insn(p, BRW_OPCODE_##OP); \
910 brw_set_dest(p, rnd, dest); \
911 brw_set_src0(p, rnd, src); \
912 \
913 if (devinfo->gen < 6) { \
914 /* turn on round-increments */ \
915 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
916 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
917 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
918 } \
919 }
920
921
922 ALU2(SEL)
923 ALU1(NOT)
924 ALU2(AND)
925 ALU2(OR)
926 ALU2(XOR)
927 ALU2(SHR)
928 ALU2(SHL)
929 ALU1(DIM)
930 ALU2(ASR)
931 ALU3(CSEL)
932 ALU1(FRC)
933 ALU1(RNDD)
934 ALU2(MAC)
935 ALU2(MACH)
936 ALU1(LZD)
937 ALU2(DP4)
938 ALU2(DPH)
939 ALU2(DP3)
940 ALU2(DP2)
941 ALU3(MAD)
942 ALU3F(LRP)
943 ALU1(BFREV)
944 ALU3(BFE)
945 ALU2(BFI1)
946 ALU3(BFI2)
947 ALU1(FBH)
948 ALU1(FBL)
949 ALU1(CBIT)
950 ALU2(ADDC)
951 ALU2(SUBB)
952
953 ROUND(RNDZ)
954 ROUND(RNDE)
955
956 brw_inst *
957 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
958 {
959 const struct gen_device_info *devinfo = p->devinfo;
960
961 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
962 * To avoid the problems that causes, we use a <1,2,0> source region to read
963 * each element twice.
964 */
965 if (devinfo->gen == 7 && !devinfo->is_haswell &&
966 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
967 dest.type == BRW_REGISTER_TYPE_DF &&
968 (src0.type == BRW_REGISTER_TYPE_F ||
969 src0.type == BRW_REGISTER_TYPE_D ||
970 src0.type == BRW_REGISTER_TYPE_UD) &&
971 !has_scalar_region(src0)) {
972 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
973 src0.width == BRW_WIDTH_4 &&
974 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
975
976 src0.vstride = BRW_VERTICAL_STRIDE_1;
977 src0.width = BRW_WIDTH_2;
978 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
979 }
980
981 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
982 }
983
984 brw_inst *
985 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
986 struct brw_reg src0, struct brw_reg src1)
987 {
988 /* 6.2.2: add */
989 if (src0.type == BRW_REGISTER_TYPE_F ||
990 (src0.file == BRW_IMMEDIATE_VALUE &&
991 src0.type == BRW_REGISTER_TYPE_VF)) {
992 assert(src1.type != BRW_REGISTER_TYPE_UD);
993 assert(src1.type != BRW_REGISTER_TYPE_D);
994 }
995
996 if (src1.type == BRW_REGISTER_TYPE_F ||
997 (src1.file == BRW_IMMEDIATE_VALUE &&
998 src1.type == BRW_REGISTER_TYPE_VF)) {
999 assert(src0.type != BRW_REGISTER_TYPE_UD);
1000 assert(src0.type != BRW_REGISTER_TYPE_D);
1001 }
1002
1003 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1004 }
1005
1006 brw_inst *
1007 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1008 struct brw_reg src0, struct brw_reg src1)
1009 {
1010 assert(dest.type == src0.type);
1011 assert(src0.type == src1.type);
1012 switch (src0.type) {
1013 case BRW_REGISTER_TYPE_B:
1014 case BRW_REGISTER_TYPE_UB:
1015 case BRW_REGISTER_TYPE_W:
1016 case BRW_REGISTER_TYPE_UW:
1017 case BRW_REGISTER_TYPE_D:
1018 case BRW_REGISTER_TYPE_UD:
1019 break;
1020 default:
1021 unreachable("Bad type for brw_AVG");
1022 }
1023
1024 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1025 }
1026
1027 brw_inst *
1028 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1029 struct brw_reg src0, struct brw_reg src1)
1030 {
1031 /* 6.32.38: mul */
1032 if (src0.type == BRW_REGISTER_TYPE_D ||
1033 src0.type == BRW_REGISTER_TYPE_UD ||
1034 src1.type == BRW_REGISTER_TYPE_D ||
1035 src1.type == BRW_REGISTER_TYPE_UD) {
1036 assert(dest.type != BRW_REGISTER_TYPE_F);
1037 }
1038
1039 if (src0.type == BRW_REGISTER_TYPE_F ||
1040 (src0.file == BRW_IMMEDIATE_VALUE &&
1041 src0.type == BRW_REGISTER_TYPE_VF)) {
1042 assert(src1.type != BRW_REGISTER_TYPE_UD);
1043 assert(src1.type != BRW_REGISTER_TYPE_D);
1044 }
1045
1046 if (src1.type == BRW_REGISTER_TYPE_F ||
1047 (src1.file == BRW_IMMEDIATE_VALUE &&
1048 src1.type == BRW_REGISTER_TYPE_VF)) {
1049 assert(src0.type != BRW_REGISTER_TYPE_UD);
1050 assert(src0.type != BRW_REGISTER_TYPE_D);
1051 }
1052
1053 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1054 src0.nr != BRW_ARF_ACCUMULATOR);
1055 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1056 src1.nr != BRW_ARF_ACCUMULATOR);
1057
1058 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1059 }
1060
1061 brw_inst *
1062 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1063 struct brw_reg src0, struct brw_reg src1)
1064 {
1065 src0.vstride = BRW_VERTICAL_STRIDE_0;
1066 src0.width = BRW_WIDTH_1;
1067 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1068 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1069 }
1070
1071 brw_inst *
1072 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1073 struct brw_reg src0, struct brw_reg src1)
1074 {
1075 src0.vstride = BRW_VERTICAL_STRIDE_0;
1076 src0.width = BRW_WIDTH_1;
1077 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1078 src1.vstride = BRW_VERTICAL_STRIDE_8;
1079 src1.width = BRW_WIDTH_8;
1080 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1081 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1082 }
1083
1084 brw_inst *
1085 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1086 {
1087 const struct gen_device_info *devinfo = p->devinfo;
1088 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1089 /* The F32TO16 instruction doesn't support 32-bit destination types in
1090 * Align1 mode, and neither does the Gen8 implementation in terms of a
1091 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1092 * an undocumented feature.
1093 */
1094 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1095 (!align16 || devinfo->gen >= 8));
1096 brw_inst *inst;
1097
1098 if (align16) {
1099 assert(dst.type == BRW_REGISTER_TYPE_UD);
1100 } else {
1101 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1102 dst.type == BRW_REGISTER_TYPE_W ||
1103 dst.type == BRW_REGISTER_TYPE_UW ||
1104 dst.type == BRW_REGISTER_TYPE_HF);
1105 }
1106
1107 brw_push_insn_state(p);
1108
1109 if (needs_zero_fill) {
1110 brw_set_default_access_mode(p, BRW_ALIGN_1);
1111 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1112 }
1113
1114 if (devinfo->gen >= 8) {
1115 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1116 } else {
1117 assert(devinfo->gen == 7);
1118 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1119 }
1120
1121 if (needs_zero_fill) {
1122 brw_inst_set_no_dd_clear(devinfo, inst, true);
1123 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1124 brw_inst_set_no_dd_check(devinfo, inst, true);
1125 }
1126
1127 brw_pop_insn_state(p);
1128 return inst;
1129 }
1130
1131 brw_inst *
1132 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1133 {
1134 const struct gen_device_info *devinfo = p->devinfo;
1135 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1136
1137 if (align16) {
1138 assert(src.type == BRW_REGISTER_TYPE_UD);
1139 } else {
1140 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1141 *
1142 * Because this instruction does not have a 16-bit floating-point
1143 * type, the source data type must be Word (W). The destination type
1144 * must be F (Float).
1145 */
1146 if (src.type == BRW_REGISTER_TYPE_UD)
1147 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1148
1149 assert(src.type == BRW_REGISTER_TYPE_W ||
1150 src.type == BRW_REGISTER_TYPE_UW ||
1151 src.type == BRW_REGISTER_TYPE_HF);
1152 }
1153
1154 if (devinfo->gen >= 8) {
1155 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1156 } else {
1157 assert(devinfo->gen == 7);
1158 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1159 }
1160 }
1161
1162
1163 void brw_NOP(struct brw_codegen *p)
1164 {
1165 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1166 memset(insn, 0, sizeof(*insn));
1167 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1168 }
1169
1170
1171
1172
1173
1174 /***********************************************************************
1175 * Comparisons, if/else/endif
1176 */
1177
1178 brw_inst *
1179 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1180 unsigned predicate_control)
1181 {
1182 const struct gen_device_info *devinfo = p->devinfo;
1183 struct brw_reg ip = brw_ip_reg();
1184 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1185
1186 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1187 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1188 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1189 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1190
1191 return inst;
1192 }
1193
1194 static void
1195 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1196 {
1197 p->if_stack[p->if_stack_depth] = inst - p->store;
1198
1199 p->if_stack_depth++;
1200 if (p->if_stack_array_size <= p->if_stack_depth) {
1201 p->if_stack_array_size *= 2;
1202 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1203 p->if_stack_array_size);
1204 }
1205 }
1206
1207 static brw_inst *
1208 pop_if_stack(struct brw_codegen *p)
1209 {
1210 p->if_stack_depth--;
1211 return &p->store[p->if_stack[p->if_stack_depth]];
1212 }
1213
1214 static void
1215 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1216 {
1217 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1218 p->loop_stack_array_size *= 2;
1219 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1220 p->loop_stack_array_size);
1221 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1222 p->loop_stack_array_size);
1223 }
1224
1225 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1226 p->loop_stack_depth++;
1227 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1228 }
1229
1230 static brw_inst *
1231 get_inner_do_insn(struct brw_codegen *p)
1232 {
1233 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1234 }
1235
1236 /* EU takes the value from the flag register and pushes it onto some
1237 * sort of a stack (presumably merging with any flag value already on
1238 * the stack). Within an if block, the flags at the top of the stack
1239 * control execution on each channel of the unit, eg. on each of the
1240 * 16 pixel values in our wm programs.
1241 *
1242 * When the matching 'else' instruction is reached (presumably by
1243 * countdown of the instruction count patched in by our ELSE/ENDIF
1244 * functions), the relevant flags are inverted.
1245 *
1246 * When the matching 'endif' instruction is reached, the flags are
1247 * popped off. If the stack is now empty, normal execution resumes.
1248 */
1249 brw_inst *
1250 brw_IF(struct brw_codegen *p, unsigned execute_size)
1251 {
1252 const struct gen_device_info *devinfo = p->devinfo;
1253 brw_inst *insn;
1254
1255 insn = next_insn(p, BRW_OPCODE_IF);
1256
1257 /* Override the defaults for this instruction:
1258 */
1259 if (devinfo->gen < 6) {
1260 brw_set_dest(p, insn, brw_ip_reg());
1261 brw_set_src0(p, insn, brw_ip_reg());
1262 brw_set_src1(p, insn, brw_imm_d(0x0));
1263 } else if (devinfo->gen == 6) {
1264 brw_set_dest(p, insn, brw_imm_w(0));
1265 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1266 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1267 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1268 } else if (devinfo->gen == 7) {
1269 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1270 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1271 brw_set_src1(p, insn, brw_imm_w(0));
1272 brw_inst_set_jip(devinfo, insn, 0);
1273 brw_inst_set_uip(devinfo, insn, 0);
1274 } else {
1275 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1276 brw_set_src0(p, insn, brw_imm_d(0));
1277 brw_inst_set_jip(devinfo, insn, 0);
1278 brw_inst_set_uip(devinfo, insn, 0);
1279 }
1280
1281 brw_inst_set_exec_size(devinfo, insn, execute_size);
1282 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1283 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1284 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1285 if (!p->single_program_flow && devinfo->gen < 6)
1286 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1287
1288 push_if_stack(p, insn);
1289 p->if_depth_in_loop[p->loop_stack_depth]++;
1290 return insn;
1291 }
1292
1293 /* This function is only used for gen6-style IF instructions with an
1294 * embedded comparison (conditional modifier). It is not used on gen7.
1295 */
1296 brw_inst *
1297 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1298 struct brw_reg src0, struct brw_reg src1)
1299 {
1300 const struct gen_device_info *devinfo = p->devinfo;
1301 brw_inst *insn;
1302
1303 insn = next_insn(p, BRW_OPCODE_IF);
1304
1305 brw_set_dest(p, insn, brw_imm_w(0));
1306 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1307 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1308 brw_set_src0(p, insn, src0);
1309 brw_set_src1(p, insn, src1);
1310
1311 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1312 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1313 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1314
1315 push_if_stack(p, insn);
1316 return insn;
1317 }
1318
1319 /**
1320 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1321 */
1322 static void
1323 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1324 brw_inst *if_inst, brw_inst *else_inst)
1325 {
1326 const struct gen_device_info *devinfo = p->devinfo;
1327
1328 /* The next instruction (where the ENDIF would be, if it existed) */
1329 brw_inst *next_inst = &p->store[p->nr_insn];
1330
1331 assert(p->single_program_flow);
1332 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1333 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1334 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1335
1336 /* Convert IF to an ADD instruction that moves the instruction pointer
1337 * to the first instruction of the ELSE block. If there is no ELSE
1338 * block, point to where ENDIF would be. Reverse the predicate.
1339 *
1340 * There's no need to execute an ENDIF since we don't need to do any
1341 * stack operations, and if we're currently executing, we just want to
1342 * continue normally.
1343 */
1344 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1345 brw_inst_set_pred_inv(devinfo, if_inst, true);
1346
1347 if (else_inst != NULL) {
1348 /* Convert ELSE to an ADD instruction that points where the ENDIF
1349 * would be.
1350 */
1351 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1352
1353 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1354 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1355 } else {
1356 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1357 }
1358 }
1359
1360 /**
1361 * Patch IF and ELSE instructions with appropriate jump targets.
1362 */
1363 static void
1364 patch_IF_ELSE(struct brw_codegen *p,
1365 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1366 {
1367 const struct gen_device_info *devinfo = p->devinfo;
1368
1369 /* We shouldn't be patching IF and ELSE instructions in single program flow
1370 * mode when gen < 6, because in single program flow mode on those
1371 * platforms, we convert flow control instructions to conditional ADDs that
1372 * operate on IP (see brw_ENDIF).
1373 *
1374 * However, on Gen6, writing to IP doesn't work in single program flow mode
1375 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1376 * not be updated by non-flow control instructions."). And on later
1377 * platforms, there is no significant benefit to converting control flow
1378 * instructions to conditional ADDs. So we do patch IF and ELSE
1379 * instructions in single program flow mode on those platforms.
1380 */
1381 if (devinfo->gen < 6)
1382 assert(!p->single_program_flow);
1383
1384 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1385 assert(endif_inst != NULL);
1386 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1387
1388 unsigned br = brw_jump_scale(devinfo);
1389
1390 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1391 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1392
1393 if (else_inst == NULL) {
1394 /* Patch IF -> ENDIF */
1395 if (devinfo->gen < 6) {
1396 /* Turn it into an IFF, which means no mask stack operations for
1397 * all-false and jumping past the ENDIF.
1398 */
1399 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1400 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1401 br * (endif_inst - if_inst + 1));
1402 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1403 } else if (devinfo->gen == 6) {
1404 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1405 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1406 } else {
1407 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1408 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1409 }
1410 } else {
1411 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1412
1413 /* Patch IF -> ELSE */
1414 if (devinfo->gen < 6) {
1415 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1416 br * (else_inst - if_inst));
1417 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1418 } else if (devinfo->gen == 6) {
1419 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1420 br * (else_inst - if_inst + 1));
1421 }
1422
1423 /* Patch ELSE -> ENDIF */
1424 if (devinfo->gen < 6) {
1425 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1426 * matching ENDIF.
1427 */
1428 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1429 br * (endif_inst - else_inst + 1));
1430 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1431 } else if (devinfo->gen == 6) {
1432 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1433 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1434 br * (endif_inst - else_inst));
1435 } else {
1436 /* The IF instruction's JIP should point just past the ELSE */
1437 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1438 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1439 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1440 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1441 if (devinfo->gen >= 8) {
1442 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1443 * should point to ENDIF.
1444 */
1445 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1446 }
1447 }
1448 }
1449 }
1450
1451 void
1452 brw_ELSE(struct brw_codegen *p)
1453 {
1454 const struct gen_device_info *devinfo = p->devinfo;
1455 brw_inst *insn;
1456
1457 insn = next_insn(p, BRW_OPCODE_ELSE);
1458
1459 if (devinfo->gen < 6) {
1460 brw_set_dest(p, insn, brw_ip_reg());
1461 brw_set_src0(p, insn, brw_ip_reg());
1462 brw_set_src1(p, insn, brw_imm_d(0x0));
1463 } else if (devinfo->gen == 6) {
1464 brw_set_dest(p, insn, brw_imm_w(0));
1465 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1466 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1467 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1468 } else if (devinfo->gen == 7) {
1469 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1470 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1471 brw_set_src1(p, insn, brw_imm_w(0));
1472 brw_inst_set_jip(devinfo, insn, 0);
1473 brw_inst_set_uip(devinfo, insn, 0);
1474 } else {
1475 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1476 brw_set_src0(p, insn, brw_imm_d(0));
1477 brw_inst_set_jip(devinfo, insn, 0);
1478 brw_inst_set_uip(devinfo, insn, 0);
1479 }
1480
1481 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1482 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1483 if (!p->single_program_flow && devinfo->gen < 6)
1484 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1485
1486 push_if_stack(p, insn);
1487 }
1488
1489 void
1490 brw_ENDIF(struct brw_codegen *p)
1491 {
1492 const struct gen_device_info *devinfo = p->devinfo;
1493 brw_inst *insn = NULL;
1494 brw_inst *else_inst = NULL;
1495 brw_inst *if_inst = NULL;
1496 brw_inst *tmp;
1497 bool emit_endif = true;
1498
1499 /* In single program flow mode, we can express IF and ELSE instructions
1500 * equivalently as ADD instructions that operate on IP. On platforms prior
1501 * to Gen6, flow control instructions cause an implied thread switch, so
1502 * this is a significant savings.
1503 *
1504 * However, on Gen6, writing to IP doesn't work in single program flow mode
1505 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1506 * not be updated by non-flow control instructions."). And on later
1507 * platforms, there is no significant benefit to converting control flow
1508 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1509 * Gen5.
1510 */
1511 if (devinfo->gen < 6 && p->single_program_flow)
1512 emit_endif = false;
1513
1514 /*
1515 * A single next_insn() may change the base address of instruction store
1516 * memory(p->store), so call it first before referencing the instruction
1517 * store pointer from an index
1518 */
1519 if (emit_endif)
1520 insn = next_insn(p, BRW_OPCODE_ENDIF);
1521
1522 /* Pop the IF and (optional) ELSE instructions from the stack */
1523 p->if_depth_in_loop[p->loop_stack_depth]--;
1524 tmp = pop_if_stack(p);
1525 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1526 else_inst = tmp;
1527 tmp = pop_if_stack(p);
1528 }
1529 if_inst = tmp;
1530
1531 if (!emit_endif) {
1532 /* ENDIF is useless; don't bother emitting it. */
1533 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1534 return;
1535 }
1536
1537 if (devinfo->gen < 6) {
1538 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1539 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1540 brw_set_src1(p, insn, brw_imm_d(0x0));
1541 } else if (devinfo->gen == 6) {
1542 brw_set_dest(p, insn, brw_imm_w(0));
1543 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1544 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1545 } else if (devinfo->gen == 7) {
1546 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1547 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1548 brw_set_src1(p, insn, brw_imm_w(0));
1549 } else {
1550 brw_set_src0(p, insn, brw_imm_d(0));
1551 }
1552
1553 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1554 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1555 if (devinfo->gen < 6)
1556 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1557
1558 /* Also pop item off the stack in the endif instruction: */
1559 if (devinfo->gen < 6) {
1560 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1561 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1562 } else if (devinfo->gen == 6) {
1563 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1564 } else {
1565 brw_inst_set_jip(devinfo, insn, 2);
1566 }
1567 patch_IF_ELSE(p, if_inst, else_inst, insn);
1568 }
1569
1570 brw_inst *
1571 brw_BREAK(struct brw_codegen *p)
1572 {
1573 const struct gen_device_info *devinfo = p->devinfo;
1574 brw_inst *insn;
1575
1576 insn = next_insn(p, BRW_OPCODE_BREAK);
1577 if (devinfo->gen >= 8) {
1578 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1579 brw_set_src0(p, insn, brw_imm_d(0x0));
1580 } else if (devinfo->gen >= 6) {
1581 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1582 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1583 brw_set_src1(p, insn, brw_imm_d(0x0));
1584 } else {
1585 brw_set_dest(p, insn, brw_ip_reg());
1586 brw_set_src0(p, insn, brw_ip_reg());
1587 brw_set_src1(p, insn, brw_imm_d(0x0));
1588 brw_inst_set_gen4_pop_count(devinfo, insn,
1589 p->if_depth_in_loop[p->loop_stack_depth]);
1590 }
1591 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1592 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1593
1594 return insn;
1595 }
1596
1597 brw_inst *
1598 brw_CONT(struct brw_codegen *p)
1599 {
1600 const struct gen_device_info *devinfo = p->devinfo;
1601 brw_inst *insn;
1602
1603 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1604 brw_set_dest(p, insn, brw_ip_reg());
1605 if (devinfo->gen >= 8) {
1606 brw_set_src0(p, insn, brw_imm_d(0x0));
1607 } else {
1608 brw_set_src0(p, insn, brw_ip_reg());
1609 brw_set_src1(p, insn, brw_imm_d(0x0));
1610 }
1611
1612 if (devinfo->gen < 6) {
1613 brw_inst_set_gen4_pop_count(devinfo, insn,
1614 p->if_depth_in_loop[p->loop_stack_depth]);
1615 }
1616 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1617 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1618 return insn;
1619 }
1620
1621 brw_inst *
1622 gen6_HALT(struct brw_codegen *p)
1623 {
1624 const struct gen_device_info *devinfo = p->devinfo;
1625 brw_inst *insn;
1626
1627 insn = next_insn(p, BRW_OPCODE_HALT);
1628 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1629 if (devinfo->gen >= 8) {
1630 brw_set_src0(p, insn, brw_imm_d(0x0));
1631 } else {
1632 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1633 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1634 }
1635
1636 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1637 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1638 return insn;
1639 }
1640
1641 /* DO/WHILE loop:
1642 *
1643 * The DO/WHILE is just an unterminated loop -- break or continue are
1644 * used for control within the loop. We have a few ways they can be
1645 * done.
1646 *
1647 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1648 * jip and no DO instruction.
1649 *
1650 * For non-uniform control flow pre-gen6, there's a DO instruction to
1651 * push the mask, and a WHILE to jump back, and BREAK to get out and
1652 * pop the mask.
1653 *
1654 * For gen6, there's no more mask stack, so no need for DO. WHILE
1655 * just points back to the first instruction of the loop.
1656 */
1657 brw_inst *
1658 brw_DO(struct brw_codegen *p, unsigned execute_size)
1659 {
1660 const struct gen_device_info *devinfo = p->devinfo;
1661
1662 if (devinfo->gen >= 6 || p->single_program_flow) {
1663 push_loop_stack(p, &p->store[p->nr_insn]);
1664 return &p->store[p->nr_insn];
1665 } else {
1666 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1667
1668 push_loop_stack(p, insn);
1669
1670 /* Override the defaults for this instruction:
1671 */
1672 brw_set_dest(p, insn, brw_null_reg());
1673 brw_set_src0(p, insn, brw_null_reg());
1674 brw_set_src1(p, insn, brw_null_reg());
1675
1676 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1677 brw_inst_set_exec_size(devinfo, insn, execute_size);
1678 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1679
1680 return insn;
1681 }
1682 }
1683
1684 /**
1685 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1686 * instruction here.
1687 *
1688 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1689 * nesting, since it can always just point to the end of the block/current loop.
1690 */
1691 static void
1692 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1693 {
1694 const struct gen_device_info *devinfo = p->devinfo;
1695 brw_inst *do_inst = get_inner_do_insn(p);
1696 brw_inst *inst;
1697 unsigned br = brw_jump_scale(devinfo);
1698
1699 assert(devinfo->gen < 6);
1700
1701 for (inst = while_inst - 1; inst != do_inst; inst--) {
1702 /* If the jump count is != 0, that means that this instruction has already
1703 * been patched because it's part of a loop inside of the one we're
1704 * patching.
1705 */
1706 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1707 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1708 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1709 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1710 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1711 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1712 }
1713 }
1714 }
1715
1716 brw_inst *
1717 brw_WHILE(struct brw_codegen *p)
1718 {
1719 const struct gen_device_info *devinfo = p->devinfo;
1720 brw_inst *insn, *do_insn;
1721 unsigned br = brw_jump_scale(devinfo);
1722
1723 if (devinfo->gen >= 6) {
1724 insn = next_insn(p, BRW_OPCODE_WHILE);
1725 do_insn = get_inner_do_insn(p);
1726
1727 if (devinfo->gen >= 8) {
1728 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1729 brw_set_src0(p, insn, brw_imm_d(0));
1730 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1731 } else if (devinfo->gen == 7) {
1732 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1733 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1734 brw_set_src1(p, insn, brw_imm_w(0));
1735 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1736 } else {
1737 brw_set_dest(p, insn, brw_imm_w(0));
1738 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1739 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1740 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1741 }
1742
1743 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1744
1745 } else {
1746 if (p->single_program_flow) {
1747 insn = next_insn(p, BRW_OPCODE_ADD);
1748 do_insn = get_inner_do_insn(p);
1749
1750 brw_set_dest(p, insn, brw_ip_reg());
1751 brw_set_src0(p, insn, brw_ip_reg());
1752 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1753 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1754 } else {
1755 insn = next_insn(p, BRW_OPCODE_WHILE);
1756 do_insn = get_inner_do_insn(p);
1757
1758 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1759
1760 brw_set_dest(p, insn, brw_ip_reg());
1761 brw_set_src0(p, insn, brw_ip_reg());
1762 brw_set_src1(p, insn, brw_imm_d(0));
1763
1764 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1765 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1766 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1767
1768 brw_patch_break_cont(p, insn);
1769 }
1770 }
1771 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1772
1773 p->loop_stack_depth--;
1774
1775 return insn;
1776 }
1777
1778 /* FORWARD JUMPS:
1779 */
1780 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1781 {
1782 const struct gen_device_info *devinfo = p->devinfo;
1783 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1784 unsigned jmpi = 1;
1785
1786 if (devinfo->gen >= 5)
1787 jmpi = 2;
1788
1789 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1790 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1791
1792 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1793 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1794 }
1795
1796 /* To integrate with the above, it makes sense that the comparison
1797 * instruction should populate the flag register. It might be simpler
1798 * just to use the flag reg for most WM tasks?
1799 */
1800 void brw_CMP(struct brw_codegen *p,
1801 struct brw_reg dest,
1802 unsigned conditional,
1803 struct brw_reg src0,
1804 struct brw_reg src1)
1805 {
1806 const struct gen_device_info *devinfo = p->devinfo;
1807 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1808
1809 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1810 brw_set_dest(p, insn, dest);
1811 brw_set_src0(p, insn, src0);
1812 brw_set_src1(p, insn, src1);
1813
1814 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1815 * page says:
1816 * "Any CMP instruction with a null destination must use a {switch}."
1817 *
1818 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1819 * mentioned on their work-arounds pages.
1820 */
1821 if (devinfo->gen == 7) {
1822 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1823 dest.nr == BRW_ARF_NULL) {
1824 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1825 }
1826 }
1827 }
1828
1829 /***********************************************************************
1830 * Helpers for the various SEND message types:
1831 */
1832
1833 /** Extended math function, float[8].
1834 */
1835 void gen4_math(struct brw_codegen *p,
1836 struct brw_reg dest,
1837 unsigned function,
1838 unsigned msg_reg_nr,
1839 struct brw_reg src,
1840 unsigned precision )
1841 {
1842 const struct gen_device_info *devinfo = p->devinfo;
1843 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1844 unsigned data_type;
1845 if (has_scalar_region(src)) {
1846 data_type = BRW_MATH_DATA_SCALAR;
1847 } else {
1848 data_type = BRW_MATH_DATA_VECTOR;
1849 }
1850
1851 assert(devinfo->gen < 6);
1852
1853 /* Example code doesn't set predicate_control for send
1854 * instructions.
1855 */
1856 brw_inst_set_pred_control(devinfo, insn, 0);
1857 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1858
1859 brw_set_dest(p, insn, dest);
1860 brw_set_src0(p, insn, src);
1861 brw_set_math_message(p,
1862 insn,
1863 function,
1864 src.type == BRW_REGISTER_TYPE_D,
1865 precision,
1866 data_type);
1867 }
1868
1869 void gen6_math(struct brw_codegen *p,
1870 struct brw_reg dest,
1871 unsigned function,
1872 struct brw_reg src0,
1873 struct brw_reg src1)
1874 {
1875 const struct gen_device_info *devinfo = p->devinfo;
1876 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1877
1878 assert(devinfo->gen >= 6);
1879
1880 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1881 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1882
1883 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1884 if (devinfo->gen == 6) {
1885 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1886 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1887 }
1888
1889 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1890 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1891 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1892 assert(src0.type != BRW_REGISTER_TYPE_F);
1893 assert(src1.type != BRW_REGISTER_TYPE_F);
1894 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1895 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1896 } else {
1897 assert(src0.type == BRW_REGISTER_TYPE_F);
1898 assert(src1.type == BRW_REGISTER_TYPE_F);
1899 }
1900
1901 /* Source modifiers are ignored for extended math instructions on Gen6. */
1902 if (devinfo->gen == 6) {
1903 assert(!src0.negate);
1904 assert(!src0.abs);
1905 assert(!src1.negate);
1906 assert(!src1.abs);
1907 }
1908
1909 brw_inst_set_math_function(devinfo, insn, function);
1910
1911 brw_set_dest(p, insn, dest);
1912 brw_set_src0(p, insn, src0);
1913 brw_set_src1(p, insn, src1);
1914 }
1915
1916 /**
1917 * Return the right surface index to access the thread scratch space using
1918 * stateless dataport messages.
1919 */
1920 unsigned
1921 brw_scratch_surface_idx(const struct brw_codegen *p)
1922 {
1923 /* The scratch space is thread-local so IA coherency is unnecessary. */
1924 if (p->devinfo->gen >= 8)
1925 return GEN8_BTI_STATELESS_NON_COHERENT;
1926 else
1927 return BRW_BTI_STATELESS;
1928 }
1929
1930 /**
1931 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1932 * using a constant offset per channel.
1933 *
1934 * The offset must be aligned to oword size (16 bytes). Used for
1935 * register spilling.
1936 */
1937 void brw_oword_block_write_scratch(struct brw_codegen *p,
1938 struct brw_reg mrf,
1939 int num_regs,
1940 unsigned offset)
1941 {
1942 const struct gen_device_info *devinfo = p->devinfo;
1943 const unsigned target_cache =
1944 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1945 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1946 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
1947 uint32_t msg_type;
1948
1949 if (devinfo->gen >= 6)
1950 offset /= 16;
1951
1952 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1953
1954 const unsigned mlen = 1 + num_regs;
1955
1956 /* Set up the message header. This is g0, with g0.2 filled with
1957 * the offset. We don't want to leave our offset around in g0 or
1958 * it'll screw up texture samples, so set it up inside the message
1959 * reg.
1960 */
1961 {
1962 brw_push_insn_state(p);
1963 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1964 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1965 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1966
1967 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1968
1969 /* set message header global offset field (reg 0, element 2) */
1970 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1971 brw_MOV(p,
1972 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1973 mrf.nr,
1974 2), BRW_REGISTER_TYPE_UD),
1975 brw_imm_ud(offset));
1976
1977 brw_pop_insn_state(p);
1978 }
1979
1980 {
1981 struct brw_reg dest;
1982 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1983 int send_commit_msg;
1984 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1985 BRW_REGISTER_TYPE_UW);
1986
1987 brw_inst_set_compression(devinfo, insn, false);
1988
1989 if (brw_inst_exec_size(devinfo, insn) >= 16)
1990 src_header = vec16(src_header);
1991
1992 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1993 if (devinfo->gen < 6)
1994 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
1995
1996 /* Until gen6, writes followed by reads from the same location
1997 * are not guaranteed to be ordered unless write_commit is set.
1998 * If set, then a no-op write is issued to the destination
1999 * register to set a dependency, and a read from the destination
2000 * can be used to ensure the ordering.
2001 *
2002 * For gen6, only writes between different threads need ordering
2003 * protection. Our use of DP writes is all about register
2004 * spilling within a thread.
2005 */
2006 if (devinfo->gen >= 6) {
2007 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2008 send_commit_msg = 0;
2009 } else {
2010 dest = src_header;
2011 send_commit_msg = 1;
2012 }
2013
2014 brw_set_dest(p, insn, dest);
2015 if (devinfo->gen >= 6) {
2016 brw_set_src0(p, insn, mrf);
2017 } else {
2018 brw_set_src0(p, insn, brw_null_reg());
2019 }
2020
2021 if (devinfo->gen >= 6)
2022 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2023 else
2024 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2025
2026 brw_set_dp_write_message(p,
2027 insn,
2028 brw_scratch_surface_idx(p),
2029 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2030 msg_type,
2031 target_cache,
2032 mlen,
2033 true, /* header_present */
2034 0, /* not a render target */
2035 send_commit_msg, /* response_length */
2036 0, /* eot */
2037 send_commit_msg);
2038 }
2039 }
2040
2041
2042 /**
2043 * Read a block of owords (half a GRF each) from the scratch buffer
2044 * using a constant index per channel.
2045 *
2046 * Offset must be aligned to oword size (16 bytes). Used for register
2047 * spilling.
2048 */
2049 void
2050 brw_oword_block_read_scratch(struct brw_codegen *p,
2051 struct brw_reg dest,
2052 struct brw_reg mrf,
2053 int num_regs,
2054 unsigned offset)
2055 {
2056 const struct gen_device_info *devinfo = p->devinfo;
2057
2058 if (devinfo->gen >= 6)
2059 offset /= 16;
2060
2061 if (p->devinfo->gen >= 7) {
2062 /* On gen 7 and above, we no longer have message registers and we can
2063 * send from any register we want. By using the destination register
2064 * for the message, we guarantee that the implied message write won't
2065 * accidentally overwrite anything. This has been a problem because
2066 * the MRF registers and source for the final FB write are both fixed
2067 * and may overlap.
2068 */
2069 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2070 } else {
2071 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2072 }
2073 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2074
2075 const unsigned rlen = num_regs;
2076 const unsigned target_cache =
2077 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2078 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2079 BRW_SFID_DATAPORT_READ);
2080
2081 {
2082 brw_push_insn_state(p);
2083 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2084 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2085 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2086
2087 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2088
2089 /* set message header global offset field (reg 0, element 2) */
2090 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2091 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2092
2093 brw_pop_insn_state(p);
2094 }
2095
2096 {
2097 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2098
2099 brw_inst_set_sfid(devinfo, insn, target_cache);
2100 assert(brw_inst_pred_control(devinfo, insn) == 0);
2101 brw_inst_set_compression(devinfo, insn, false);
2102
2103 brw_set_dest(p, insn, dest); /* UW? */
2104 if (devinfo->gen >= 6) {
2105 brw_set_src0(p, insn, mrf);
2106 } else {
2107 brw_set_src0(p, insn, brw_null_reg());
2108 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2109 }
2110
2111 brw_set_desc(p, insn,
2112 brw_message_desc(devinfo, 1, rlen, true) |
2113 brw_dp_read_desc(devinfo, brw_scratch_surface_idx(p),
2114 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2115 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2116 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2117 }
2118 }
2119
2120 void
2121 gen7_block_read_scratch(struct brw_codegen *p,
2122 struct brw_reg dest,
2123 int num_regs,
2124 unsigned offset)
2125 {
2126 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2127 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2128
2129 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2130
2131 /* The HW requires that the header is present; this is to get the g0.5
2132 * scratch offset.
2133 */
2134 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2135
2136 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2137 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2138 * is 32 bytes, which happens to be the size of a register.
2139 */
2140 offset /= REG_SIZE;
2141 assert(offset < (1 << 12));
2142
2143 gen7_set_dp_scratch_message(p, insn,
2144 false, /* scratch read */
2145 false, /* OWords */
2146 false, /* invalidate after read */
2147 num_regs,
2148 offset,
2149 1, /* mlen: just g0 */
2150 num_regs, /* rlen */
2151 true); /* header present */
2152 }
2153
2154 /**
2155 * Read float[4] vectors from the data port constant cache.
2156 * Location (in buffer) should be a multiple of 16.
2157 * Used for fetching shader constants.
2158 */
2159 void brw_oword_block_read(struct brw_codegen *p,
2160 struct brw_reg dest,
2161 struct brw_reg mrf,
2162 uint32_t offset,
2163 uint32_t bind_table_index)
2164 {
2165 const struct gen_device_info *devinfo = p->devinfo;
2166 const unsigned target_cache =
2167 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2168 BRW_SFID_DATAPORT_READ);
2169 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2170
2171 /* On newer hardware, offset is in units of owords. */
2172 if (devinfo->gen >= 6)
2173 offset /= 16;
2174
2175 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2176
2177 brw_push_insn_state(p);
2178 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2179 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2180 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2181
2182 brw_push_insn_state(p);
2183 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2184 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2185
2186 /* set message header global offset field (reg 0, element 2) */
2187 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2188 brw_MOV(p,
2189 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2190 mrf.nr,
2191 2), BRW_REGISTER_TYPE_UD),
2192 brw_imm_ud(offset));
2193 brw_pop_insn_state(p);
2194
2195 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2196
2197 brw_inst_set_sfid(devinfo, insn, target_cache);
2198
2199 /* cast dest to a uword[8] vector */
2200 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2201
2202 brw_set_dest(p, insn, dest);
2203 if (devinfo->gen >= 6) {
2204 brw_set_src0(p, insn, mrf);
2205 } else {
2206 brw_set_src0(p, insn, brw_null_reg());
2207 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2208 }
2209
2210 brw_set_desc(p, insn,
2211 brw_message_desc(devinfo, 1, DIV_ROUND_UP(exec_size, 8), true) |
2212 brw_dp_read_desc(devinfo, bind_table_index,
2213 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2214 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2215 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
2216
2217 brw_pop_insn_state(p);
2218 }
2219
2220 brw_inst *
2221 brw_fb_WRITE(struct brw_codegen *p,
2222 struct brw_reg payload,
2223 struct brw_reg implied_header,
2224 unsigned msg_control,
2225 unsigned binding_table_index,
2226 unsigned msg_length,
2227 unsigned response_length,
2228 bool eot,
2229 bool last_render_target,
2230 bool header_present)
2231 {
2232 const struct gen_device_info *devinfo = p->devinfo;
2233 const unsigned target_cache =
2234 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2235 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2236 brw_inst *insn;
2237 unsigned msg_type;
2238 struct brw_reg dest, src0;
2239
2240 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2241 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2242 else
2243 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2244
2245 if (devinfo->gen >= 6) {
2246 insn = next_insn(p, BRW_OPCODE_SENDC);
2247 } else {
2248 insn = next_insn(p, BRW_OPCODE_SEND);
2249 }
2250 brw_inst_set_compression(devinfo, insn, false);
2251
2252 if (devinfo->gen >= 6) {
2253 /* headerless version, just submit color payload */
2254 src0 = payload;
2255
2256 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2257 } else {
2258 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2259 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2260 src0 = implied_header;
2261
2262 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2263 }
2264
2265 brw_set_dest(p, insn, dest);
2266 brw_set_src0(p, insn, src0);
2267 brw_set_dp_write_message(p,
2268 insn,
2269 binding_table_index,
2270 msg_control,
2271 msg_type,
2272 target_cache,
2273 msg_length,
2274 header_present,
2275 last_render_target,
2276 response_length,
2277 eot,
2278 0 /* send_commit_msg */);
2279
2280 return insn;
2281 }
2282
2283 brw_inst *
2284 gen9_fb_READ(struct brw_codegen *p,
2285 struct brw_reg dst,
2286 struct brw_reg payload,
2287 unsigned binding_table_index,
2288 unsigned msg_length,
2289 unsigned response_length,
2290 bool per_sample)
2291 {
2292 const struct gen_device_info *devinfo = p->devinfo;
2293 assert(devinfo->gen >= 9);
2294 const unsigned msg_subtype =
2295 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2296 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2297
2298 brw_inst_set_sfid(devinfo, insn, GEN6_SFID_DATAPORT_RENDER_CACHE);
2299 brw_set_dest(p, insn, dst);
2300 brw_set_src0(p, insn, payload);
2301 brw_set_desc(
2302 p, insn,
2303 brw_message_desc(devinfo, msg_length, response_length, true) |
2304 brw_dp_read_desc(devinfo, binding_table_index,
2305 per_sample << 5 | msg_subtype,
2306 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2307 BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
2308 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2309
2310 return insn;
2311 }
2312
2313 /**
2314 * Texture sample instruction.
2315 * Note: the msg_type plus msg_length values determine exactly what kind
2316 * of sampling operation is performed. See volume 4, page 161 of docs.
2317 */
2318 void brw_SAMPLE(struct brw_codegen *p,
2319 struct brw_reg dest,
2320 unsigned msg_reg_nr,
2321 struct brw_reg src0,
2322 unsigned binding_table_index,
2323 unsigned sampler,
2324 unsigned msg_type,
2325 unsigned response_length,
2326 unsigned msg_length,
2327 unsigned header_present,
2328 unsigned simd_mode,
2329 unsigned return_format)
2330 {
2331 const struct gen_device_info *devinfo = p->devinfo;
2332 brw_inst *insn;
2333
2334 if (msg_reg_nr != -1)
2335 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2336
2337 insn = next_insn(p, BRW_OPCODE_SEND);
2338 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
2339 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2340
2341 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2342 *
2343 * "Instruction compression is not allowed for this instruction (that
2344 * is, send). The hardware behavior is undefined if this instruction is
2345 * set as compressed. However, compress control can be set to "SecHalf"
2346 * to affect the EMask generation."
2347 *
2348 * No similar wording is found in later PRMs, but there are examples
2349 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2350 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2351 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2352 */
2353 brw_inst_set_compression(devinfo, insn, false);
2354
2355 if (devinfo->gen < 6)
2356 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2357
2358 brw_set_dest(p, insn, dest);
2359 brw_set_src0(p, insn, src0);
2360 brw_set_desc(p, insn,
2361 brw_message_desc(devinfo, msg_length, response_length,
2362 header_present) |
2363 brw_sampler_desc(devinfo, binding_table_index, sampler,
2364 msg_type, simd_mode, return_format));
2365 }
2366
2367 /* Adjust the message header's sampler state pointer to
2368 * select the correct group of 16 samplers.
2369 */
2370 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2371 struct brw_reg header,
2372 struct brw_reg sampler_index)
2373 {
2374 /* The "Sampler Index" field can only store values between 0 and 15.
2375 * However, we can add an offset to the "Sampler State Pointer"
2376 * field, effectively selecting a different set of 16 samplers.
2377 *
2378 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2379 * offset, and each sampler state is only 16-bytes, so we can't
2380 * exclusively use the offset - we have to use both.
2381 */
2382
2383 const struct gen_device_info *devinfo = p->devinfo;
2384
2385 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2386 const int sampler_state_size = 16; /* 16 bytes */
2387 uint32_t sampler = sampler_index.ud;
2388
2389 if (sampler >= 16) {
2390 assert(devinfo->is_haswell || devinfo->gen >= 8);
2391 brw_ADD(p,
2392 get_element_ud(header, 3),
2393 get_element_ud(brw_vec8_grf(0, 0), 3),
2394 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2395 }
2396 } else {
2397 /* Non-const sampler array indexing case */
2398 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2399 return;
2400 }
2401
2402 struct brw_reg temp = get_element_ud(header, 3);
2403
2404 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2405 brw_SHL(p, temp, temp, brw_imm_ud(4));
2406 brw_ADD(p,
2407 get_element_ud(header, 3),
2408 get_element_ud(brw_vec8_grf(0, 0), 3),
2409 temp);
2410 }
2411 }
2412
2413 /* All these variables are pretty confusing - we might be better off
2414 * using bitmasks and macros for this, in the old style. Or perhaps
2415 * just having the caller instantiate the fields in dword3 itself.
2416 */
2417 void brw_urb_WRITE(struct brw_codegen *p,
2418 struct brw_reg dest,
2419 unsigned msg_reg_nr,
2420 struct brw_reg src0,
2421 enum brw_urb_write_flags flags,
2422 unsigned msg_length,
2423 unsigned response_length,
2424 unsigned offset,
2425 unsigned swizzle)
2426 {
2427 const struct gen_device_info *devinfo = p->devinfo;
2428 brw_inst *insn;
2429
2430 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2431
2432 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2433 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2434 brw_push_insn_state(p);
2435 brw_set_default_access_mode(p, BRW_ALIGN_1);
2436 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2437 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2438 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2439 BRW_REGISTER_TYPE_UD),
2440 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2441 brw_imm_ud(0xff00));
2442 brw_pop_insn_state(p);
2443 }
2444
2445 insn = next_insn(p, BRW_OPCODE_SEND);
2446
2447 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2448
2449 brw_set_dest(p, insn, dest);
2450 brw_set_src0(p, insn, src0);
2451 brw_set_src1(p, insn, brw_imm_d(0));
2452
2453 if (devinfo->gen < 6)
2454 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2455
2456 brw_set_urb_message(p,
2457 insn,
2458 flags,
2459 msg_length,
2460 response_length,
2461 offset,
2462 swizzle);
2463 }
2464
2465 struct brw_inst *
2466 brw_send_indirect_message(struct brw_codegen *p,
2467 unsigned sfid,
2468 struct brw_reg dst,
2469 struct brw_reg payload,
2470 struct brw_reg desc,
2471 unsigned desc_imm)
2472 {
2473 const struct gen_device_info *devinfo = p->devinfo;
2474 struct brw_inst *send;
2475 int setup;
2476
2477 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2478
2479 assert(desc.type == BRW_REGISTER_TYPE_UD);
2480
2481 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2482 * in the indirect case) by its index in the instruction store. The
2483 * pointer returned by next_insn() may become invalid if emitting the SEND
2484 * in the indirect case reallocs the store.
2485 */
2486
2487 if (desc.file == BRW_IMMEDIATE_VALUE) {
2488 setup = p->nr_insn;
2489 send = next_insn(p, BRW_OPCODE_SEND);
2490 brw_set_desc(p, send, desc.ud | desc_imm);
2491
2492 } else {
2493 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2494
2495 brw_push_insn_state(p);
2496 brw_set_default_access_mode(p, BRW_ALIGN_1);
2497 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2498 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2499 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2500
2501 /* Load the indirect descriptor to an address register using OR so the
2502 * caller can specify additional descriptor bits with the usual
2503 * brw_set_*_message() helper functions.
2504 */
2505 setup = p->nr_insn;
2506 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2507
2508 brw_pop_insn_state(p);
2509
2510 send = next_insn(p, BRW_OPCODE_SEND);
2511 brw_set_src1(p, send, addr);
2512 }
2513
2514 if (dst.width < BRW_EXECUTE_8)
2515 brw_inst_set_exec_size(devinfo, send, dst.width);
2516
2517 brw_set_dest(p, send, dst);
2518 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2519 brw_inst_set_sfid(devinfo, send, sfid);
2520
2521 return &p->store[setup];
2522 }
2523
2524 static struct brw_inst *
2525 brw_send_indirect_surface_message(struct brw_codegen *p,
2526 unsigned sfid,
2527 struct brw_reg dst,
2528 struct brw_reg payload,
2529 struct brw_reg surface,
2530 unsigned message_len,
2531 unsigned response_len,
2532 bool header_present)
2533 {
2534 const struct gen_device_info *devinfo = p->devinfo;
2535 struct brw_inst *insn;
2536
2537 if (surface.file != BRW_IMMEDIATE_VALUE) {
2538 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2539
2540 brw_push_insn_state(p);
2541 brw_set_default_access_mode(p, BRW_ALIGN_1);
2542 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2543 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2544 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2545
2546 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2547 * some surface array is accessed out of bounds.
2548 */
2549 insn = brw_AND(p, addr,
2550 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2551 BRW_GET_SWZ(surface.swizzle, 0)),
2552 brw_imm_ud(0xff));
2553
2554 brw_pop_insn_state(p);
2555
2556 surface = addr;
2557 }
2558
2559 insn = brw_send_indirect_message(p, sfid, dst, payload, surface, 0);
2560 brw_inst_set_mlen(devinfo, insn, message_len);
2561 brw_inst_set_rlen(devinfo, insn, response_len);
2562 brw_inst_set_header_present(devinfo, insn, header_present);
2563
2564 return insn;
2565 }
2566
2567 static bool
2568 while_jumps_before_offset(const struct gen_device_info *devinfo,
2569 brw_inst *insn, int while_offset, int start_offset)
2570 {
2571 int scale = 16 / brw_jump_scale(devinfo);
2572 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2573 : brw_inst_jip(devinfo, insn);
2574 assert(jip < 0);
2575 return while_offset + jip * scale <= start_offset;
2576 }
2577
2578
2579 static int
2580 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2581 {
2582 int offset;
2583 void *store = p->store;
2584 const struct gen_device_info *devinfo = p->devinfo;
2585
2586 int depth = 0;
2587
2588 for (offset = next_offset(devinfo, store, start_offset);
2589 offset < p->next_insn_offset;
2590 offset = next_offset(devinfo, store, offset)) {
2591 brw_inst *insn = store + offset;
2592
2593 switch (brw_inst_opcode(devinfo, insn)) {
2594 case BRW_OPCODE_IF:
2595 depth++;
2596 break;
2597 case BRW_OPCODE_ENDIF:
2598 if (depth == 0)
2599 return offset;
2600 depth--;
2601 break;
2602 case BRW_OPCODE_WHILE:
2603 /* If the while doesn't jump before our instruction, it's the end
2604 * of a sibling do...while loop. Ignore it.
2605 */
2606 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2607 continue;
2608 /* fallthrough */
2609 case BRW_OPCODE_ELSE:
2610 case BRW_OPCODE_HALT:
2611 if (depth == 0)
2612 return offset;
2613 }
2614 }
2615
2616 return 0;
2617 }
2618
2619 /* There is no DO instruction on gen6, so to find the end of the loop
2620 * we have to see if the loop is jumping back before our start
2621 * instruction.
2622 */
2623 static int
2624 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2625 {
2626 const struct gen_device_info *devinfo = p->devinfo;
2627 int offset;
2628 void *store = p->store;
2629
2630 assert(devinfo->gen >= 6);
2631
2632 /* Always start after the instruction (such as a WHILE) we're trying to fix
2633 * up.
2634 */
2635 for (offset = next_offset(devinfo, store, start_offset);
2636 offset < p->next_insn_offset;
2637 offset = next_offset(devinfo, store, offset)) {
2638 brw_inst *insn = store + offset;
2639
2640 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2641 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2642 return offset;
2643 }
2644 }
2645 assert(!"not reached");
2646 return start_offset;
2647 }
2648
2649 /* After program generation, go back and update the UIP and JIP of
2650 * BREAK, CONT, and HALT instructions to their correct locations.
2651 */
2652 void
2653 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2654 {
2655 const struct gen_device_info *devinfo = p->devinfo;
2656 int offset;
2657 int br = brw_jump_scale(devinfo);
2658 int scale = 16 / br;
2659 void *store = p->store;
2660
2661 if (devinfo->gen < 6)
2662 return;
2663
2664 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2665 brw_inst *insn = store + offset;
2666 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2667
2668 int block_end_offset = brw_find_next_block_end(p, offset);
2669 switch (brw_inst_opcode(devinfo, insn)) {
2670 case BRW_OPCODE_BREAK:
2671 assert(block_end_offset != 0);
2672 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2673 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2674 brw_inst_set_uip(devinfo, insn,
2675 (brw_find_loop_end(p, offset) - offset +
2676 (devinfo->gen == 6 ? 16 : 0)) / scale);
2677 break;
2678 case BRW_OPCODE_CONTINUE:
2679 assert(block_end_offset != 0);
2680 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2681 brw_inst_set_uip(devinfo, insn,
2682 (brw_find_loop_end(p, offset) - offset) / scale);
2683
2684 assert(brw_inst_uip(devinfo, insn) != 0);
2685 assert(brw_inst_jip(devinfo, insn) != 0);
2686 break;
2687
2688 case BRW_OPCODE_ENDIF: {
2689 int32_t jump = (block_end_offset == 0) ?
2690 1 * br : (block_end_offset - offset) / scale;
2691 if (devinfo->gen >= 7)
2692 brw_inst_set_jip(devinfo, insn, jump);
2693 else
2694 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2695 break;
2696 }
2697
2698 case BRW_OPCODE_HALT:
2699 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2700 *
2701 * "In case of the halt instruction not inside any conditional
2702 * code block, the value of <JIP> and <UIP> should be the
2703 * same. In case of the halt instruction inside conditional code
2704 * block, the <UIP> should be the end of the program, and the
2705 * <JIP> should be end of the most inner conditional code block."
2706 *
2707 * The uip will have already been set by whoever set up the
2708 * instruction.
2709 */
2710 if (block_end_offset == 0) {
2711 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2712 } else {
2713 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2714 }
2715 assert(brw_inst_uip(devinfo, insn) != 0);
2716 assert(brw_inst_jip(devinfo, insn) != 0);
2717 break;
2718 }
2719 }
2720 }
2721
2722 void brw_ff_sync(struct brw_codegen *p,
2723 struct brw_reg dest,
2724 unsigned msg_reg_nr,
2725 struct brw_reg src0,
2726 bool allocate,
2727 unsigned response_length,
2728 bool eot)
2729 {
2730 const struct gen_device_info *devinfo = p->devinfo;
2731 brw_inst *insn;
2732
2733 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2734
2735 insn = next_insn(p, BRW_OPCODE_SEND);
2736 brw_set_dest(p, insn, dest);
2737 brw_set_src0(p, insn, src0);
2738 brw_set_src1(p, insn, brw_imm_d(0));
2739
2740 if (devinfo->gen < 6)
2741 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2742
2743 brw_set_ff_sync_message(p,
2744 insn,
2745 allocate,
2746 response_length,
2747 eot);
2748 }
2749
2750 /**
2751 * Emit the SEND instruction necessary to generate stream output data on Gen6
2752 * (for transform feedback).
2753 *
2754 * If send_commit_msg is true, this is the last piece of stream output data
2755 * from this thread, so send the data as a committed write. According to the
2756 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2757 *
2758 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2759 * writes are complete by sending the final write as a committed write."
2760 */
2761 void
2762 brw_svb_write(struct brw_codegen *p,
2763 struct brw_reg dest,
2764 unsigned msg_reg_nr,
2765 struct brw_reg src0,
2766 unsigned binding_table_index,
2767 bool send_commit_msg)
2768 {
2769 const struct gen_device_info *devinfo = p->devinfo;
2770 const unsigned target_cache =
2771 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2772 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2773 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2774 brw_inst *insn;
2775
2776 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2777
2778 insn = next_insn(p, BRW_OPCODE_SEND);
2779 brw_set_dest(p, insn, dest);
2780 brw_set_src0(p, insn, src0);
2781 brw_set_src1(p, insn, brw_imm_d(0));
2782 brw_set_dp_write_message(p, insn,
2783 binding_table_index,
2784 0, /* msg_control: ignored */
2785 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2786 target_cache,
2787 1, /* msg_length */
2788 true, /* header_present */
2789 0, /* last_render_target: ignored */
2790 send_commit_msg, /* response_length */
2791 0, /* end_of_thread */
2792 send_commit_msg); /* send_commit_msg */
2793 }
2794
2795 static unsigned
2796 brw_surface_payload_size(struct brw_codegen *p,
2797 unsigned num_channels,
2798 bool has_simd4x2,
2799 bool has_simd16)
2800 {
2801 if (has_simd4x2 && brw_get_default_access_mode(p) == BRW_ALIGN_16)
2802 return 1;
2803 else if (has_simd16 && brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2804 return 2 * num_channels;
2805 else
2806 return num_channels;
2807 }
2808
2809 static void
2810 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2811 brw_inst *insn,
2812 unsigned atomic_op,
2813 bool response_expected)
2814 {
2815 const struct gen_device_info *devinfo = p->devinfo;
2816 unsigned msg_control =
2817 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2818 (response_expected ? 1 << 5 : 0); /* Return data expected */
2819
2820 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2821 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2822 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2823 msg_control |= 1 << 4; /* SIMD8 mode */
2824
2825 brw_inst_set_dp_msg_type(devinfo, insn,
2826 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2827 } else {
2828 brw_inst_set_dp_msg_type(devinfo, insn,
2829 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2830 }
2831 } else {
2832 brw_inst_set_dp_msg_type(devinfo, insn,
2833 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2834
2835 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2836 msg_control |= 1 << 4; /* SIMD8 mode */
2837 }
2838
2839 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2840 }
2841
2842 void
2843 brw_untyped_atomic(struct brw_codegen *p,
2844 struct brw_reg dst,
2845 struct brw_reg payload,
2846 struct brw_reg surface,
2847 unsigned atomic_op,
2848 unsigned msg_length,
2849 bool response_expected,
2850 bool header_present)
2851 {
2852 const struct gen_device_info *devinfo = p->devinfo;
2853 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2854 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2855 GEN7_SFID_DATAPORT_DATA_CACHE);
2856 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2857 /* Mask out unused components -- This is especially important in Align16
2858 * mode on generations that don't have native support for SIMD4x2 atomics,
2859 * because unused but enabled components will cause the dataport to perform
2860 * additional atomic operations on the addresses that happen to be in the
2861 * uninitialized Y, Z and W coordinates of the payload.
2862 */
2863 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2864 struct brw_inst *insn = brw_send_indirect_surface_message(
2865 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2866 brw_surface_payload_size(p, response_expected,
2867 devinfo->gen >= 8 || devinfo->is_haswell, true),
2868 header_present);
2869
2870 brw_set_dp_untyped_atomic_message(
2871 p, insn, atomic_op, response_expected);
2872 }
2873
2874 static void
2875 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2876 struct brw_inst *insn,
2877 unsigned num_channels)
2878 {
2879 const struct gen_device_info *devinfo = p->devinfo;
2880 /* Set mask of 32-bit channels to drop. */
2881 unsigned msg_control = 0xf & (0xf << num_channels);
2882
2883 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2884 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2885 msg_control |= 1 << 4; /* SIMD16 mode */
2886 else
2887 msg_control |= 2 << 4; /* SIMD8 mode */
2888 }
2889
2890 brw_inst_set_dp_msg_type(devinfo, insn,
2891 (devinfo->gen >= 8 || devinfo->is_haswell ?
2892 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2893 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
2894 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2895 }
2896
2897 void
2898 brw_untyped_surface_read(struct brw_codegen *p,
2899 struct brw_reg dst,
2900 struct brw_reg payload,
2901 struct brw_reg surface,
2902 unsigned msg_length,
2903 unsigned num_channels)
2904 {
2905 const struct gen_device_info *devinfo = p->devinfo;
2906 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2907 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2908 GEN7_SFID_DATAPORT_DATA_CACHE);
2909 struct brw_inst *insn = brw_send_indirect_surface_message(
2910 p, sfid, dst, payload, surface, msg_length,
2911 brw_surface_payload_size(p, num_channels, true, true),
2912 false);
2913
2914 brw_set_dp_untyped_surface_read_message(
2915 p, insn, num_channels);
2916 }
2917
2918 static void
2919 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
2920 struct brw_inst *insn,
2921 unsigned num_channels)
2922 {
2923 const struct gen_device_info *devinfo = p->devinfo;
2924 /* Set mask of 32-bit channels to drop. */
2925 unsigned msg_control = 0xf & (0xf << num_channels);
2926
2927 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2928 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2929 msg_control |= 1 << 4; /* SIMD16 mode */
2930 else
2931 msg_control |= 2 << 4; /* SIMD8 mode */
2932 } else {
2933 if (devinfo->gen >= 8 || devinfo->is_haswell)
2934 msg_control |= 0 << 4; /* SIMD4x2 mode */
2935 else
2936 msg_control |= 2 << 4; /* SIMD8 mode */
2937 }
2938
2939 brw_inst_set_dp_msg_type(devinfo, insn,
2940 devinfo->gen >= 8 || devinfo->is_haswell ?
2941 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
2942 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
2943 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2944 }
2945
2946 void
2947 brw_untyped_surface_write(struct brw_codegen *p,
2948 struct brw_reg payload,
2949 struct brw_reg surface,
2950 unsigned msg_length,
2951 unsigned num_channels,
2952 bool header_present)
2953 {
2954 const struct gen_device_info *devinfo = p->devinfo;
2955 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2956 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2957 GEN7_SFID_DATAPORT_DATA_CACHE);
2958 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2959 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
2960 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
2961 WRITEMASK_X : WRITEMASK_XYZW;
2962 struct brw_inst *insn = brw_send_indirect_surface_message(
2963 p, sfid, brw_writemask(brw_null_reg(), mask),
2964 payload, surface, msg_length, 0, header_present);
2965
2966 brw_set_dp_untyped_surface_write_message(
2967 p, insn, num_channels);
2968 }
2969
2970 static unsigned
2971 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
2972 {
2973 switch (bit_size) {
2974 case 8:
2975 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
2976 case 16:
2977 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
2978 case 32:
2979 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
2980 default:
2981 unreachable("Unsupported bit_size for byte scattered messages");
2982 }
2983 }
2984
2985
2986 void
2987 brw_byte_scattered_read(struct brw_codegen *p,
2988 struct brw_reg dst,
2989 struct brw_reg payload,
2990 struct brw_reg surface,
2991 unsigned msg_length,
2992 unsigned bit_size)
2993 {
2994 const struct gen_device_info *devinfo = p->devinfo;
2995 assert(devinfo->gen > 7 || devinfo->is_haswell);
2996 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
2997 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
2998
2999 struct brw_inst *insn = brw_send_indirect_surface_message(
3000 p, sfid, dst, payload, surface, msg_length,
3001 brw_surface_payload_size(p, 1, true, true),
3002 false);
3003
3004 unsigned msg_control =
3005 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3006
3007 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3008 msg_control |= 1; /* SIMD16 mode */
3009 else
3010 msg_control |= 0; /* SIMD8 mode */
3011
3012 brw_inst_set_dp_msg_type(devinfo, insn,
3013 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3014 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3015 }
3016
3017 void
3018 brw_byte_scattered_write(struct brw_codegen *p,
3019 struct brw_reg payload,
3020 struct brw_reg surface,
3021 unsigned msg_length,
3022 unsigned bit_size,
3023 bool header_present)
3024 {
3025 const struct gen_device_info *devinfo = p->devinfo;
3026 assert(devinfo->gen > 7 || devinfo->is_haswell);
3027 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3028 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3029
3030 struct brw_inst *insn = brw_send_indirect_surface_message(
3031 p, sfid, brw_writemask(brw_null_reg(), WRITEMASK_XYZW),
3032 payload, surface, msg_length, 0, header_present);
3033
3034 unsigned msg_control =
3035 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3036
3037 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3038 msg_control |= 1;
3039 else
3040 msg_control |= 0;
3041
3042 brw_inst_set_dp_msg_type(devinfo, insn,
3043 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3044 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3045 }
3046
3047 static void
3048 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3049 struct brw_inst *insn,
3050 unsigned atomic_op,
3051 bool response_expected)
3052 {
3053 const struct gen_device_info *devinfo = p->devinfo;
3054 unsigned msg_control =
3055 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3056 (response_expected ? 1 << 5 : 0); /* Return data expected */
3057
3058 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3059 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3060 if ((brw_get_default_group(p) / 8) % 2 == 1)
3061 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3062
3063 brw_inst_set_dp_msg_type(devinfo, insn,
3064 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3065 } else {
3066 brw_inst_set_dp_msg_type(devinfo, insn,
3067 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3068 }
3069
3070 } else {
3071 brw_inst_set_dp_msg_type(devinfo, insn,
3072 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3073
3074 if ((brw_get_default_group(p) / 8) % 2 == 1)
3075 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3076 }
3077
3078 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3079 }
3080
3081 void
3082 brw_typed_atomic(struct brw_codegen *p,
3083 struct brw_reg dst,
3084 struct brw_reg payload,
3085 struct brw_reg surface,
3086 unsigned atomic_op,
3087 unsigned msg_length,
3088 bool response_expected,
3089 bool header_present) {
3090 const struct gen_device_info *devinfo = p->devinfo;
3091 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3092 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3093 GEN6_SFID_DATAPORT_RENDER_CACHE);
3094 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3095 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3096 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3097 struct brw_inst *insn = brw_send_indirect_surface_message(
3098 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3099 brw_surface_payload_size(p, response_expected,
3100 devinfo->gen >= 8 || devinfo->is_haswell, false),
3101 header_present);
3102
3103 brw_set_dp_typed_atomic_message(
3104 p, insn, atomic_op, response_expected);
3105 }
3106
3107 static void
3108 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3109 struct brw_inst *insn,
3110 unsigned num_channels)
3111 {
3112 const struct gen_device_info *devinfo = p->devinfo;
3113 /* Set mask of unused channels. */
3114 unsigned msg_control = 0xf & (0xf << num_channels);
3115
3116 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3117 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3118 if ((brw_get_default_group(p) / 8) % 2 == 1)
3119 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3120 else
3121 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3122 }
3123
3124 brw_inst_set_dp_msg_type(devinfo, insn,
3125 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3126 } else {
3127 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3128 if ((brw_get_default_group(p) / 8) % 2 == 1)
3129 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3130 }
3131
3132 brw_inst_set_dp_msg_type(devinfo, insn,
3133 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3134 }
3135
3136 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3137 }
3138
3139 void
3140 brw_typed_surface_read(struct brw_codegen *p,
3141 struct brw_reg dst,
3142 struct brw_reg payload,
3143 struct brw_reg surface,
3144 unsigned msg_length,
3145 unsigned num_channels,
3146 bool header_present)
3147 {
3148 const struct gen_device_info *devinfo = p->devinfo;
3149 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3150 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3151 GEN6_SFID_DATAPORT_RENDER_CACHE);
3152 struct brw_inst *insn = brw_send_indirect_surface_message(
3153 p, sfid, dst, payload, surface, msg_length,
3154 brw_surface_payload_size(p, num_channels,
3155 devinfo->gen >= 8 || devinfo->is_haswell, false),
3156 header_present);
3157
3158 brw_set_dp_typed_surface_read_message(
3159 p, insn, num_channels);
3160 }
3161
3162 static void
3163 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3164 struct brw_inst *insn,
3165 unsigned num_channels)
3166 {
3167 const struct gen_device_info *devinfo = p->devinfo;
3168 /* Set mask of unused channels. */
3169 unsigned msg_control = 0xf & (0xf << num_channels);
3170
3171 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3172 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3173 if ((brw_get_default_group(p) / 8) % 2 == 1)
3174 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3175 else
3176 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3177 }
3178
3179 brw_inst_set_dp_msg_type(devinfo, insn,
3180 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3181
3182 } else {
3183 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3184 if ((brw_get_default_group(p) / 8) % 2 == 1)
3185 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3186 }
3187
3188 brw_inst_set_dp_msg_type(devinfo, insn,
3189 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3190 }
3191
3192 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3193 }
3194
3195 void
3196 brw_typed_surface_write(struct brw_codegen *p,
3197 struct brw_reg payload,
3198 struct brw_reg surface,
3199 unsigned msg_length,
3200 unsigned num_channels,
3201 bool header_present)
3202 {
3203 const struct gen_device_info *devinfo = p->devinfo;
3204 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3205 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3206 GEN6_SFID_DATAPORT_RENDER_CACHE);
3207 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3208 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3209 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3210 WRITEMASK_X : WRITEMASK_XYZW);
3211 struct brw_inst *insn = brw_send_indirect_surface_message(
3212 p, sfid, brw_writemask(brw_null_reg(), mask),
3213 payload, surface, msg_length, 0, header_present);
3214
3215 brw_set_dp_typed_surface_write_message(
3216 p, insn, num_channels);
3217 }
3218
3219 static void
3220 brw_set_memory_fence_message(struct brw_codegen *p,
3221 struct brw_inst *insn,
3222 enum brw_message_target sfid,
3223 bool commit_enable)
3224 {
3225 const struct gen_device_info *devinfo = p->devinfo;
3226
3227 brw_set_desc(p, insn, brw_message_desc(
3228 devinfo, 1, (commit_enable ? 1 : 0), true));
3229
3230 brw_inst_set_sfid(devinfo, insn, sfid);
3231
3232 switch (sfid) {
3233 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3234 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3235 break;
3236 case GEN7_SFID_DATAPORT_DATA_CACHE:
3237 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3238 break;
3239 default:
3240 unreachable("Not reached");
3241 }
3242
3243 if (commit_enable)
3244 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3245 }
3246
3247 void
3248 brw_memory_fence(struct brw_codegen *p,
3249 struct brw_reg dst,
3250 enum opcode send_op)
3251 {
3252 const struct gen_device_info *devinfo = p->devinfo;
3253 const bool commit_enable =
3254 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3255 (devinfo->gen == 7 && !devinfo->is_haswell);
3256 struct brw_inst *insn;
3257
3258 brw_push_insn_state(p);
3259 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3260 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3261 dst = vec1(dst);
3262
3263 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3264 * message doesn't write anything back.
3265 */
3266 insn = next_insn(p, send_op);
3267 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3268 brw_set_dest(p, insn, dst);
3269 brw_set_src0(p, insn, dst);
3270 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3271 commit_enable);
3272
3273 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3274 /* IVB does typed surface access through the render cache, so we need to
3275 * flush it too. Use a different register so both flushes can be
3276 * pipelined by the hardware.
3277 */
3278 insn = next_insn(p, send_op);
3279 brw_set_dest(p, insn, offset(dst, 1));
3280 brw_set_src0(p, insn, offset(dst, 1));
3281 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3282 commit_enable);
3283
3284 /* Now write the response of the second message into the response of the
3285 * first to trigger a pipeline stall -- This way future render and data
3286 * cache messages will be properly ordered with respect to past data and
3287 * render cache messages.
3288 */
3289 brw_MOV(p, dst, offset(dst, 1));
3290 }
3291
3292 brw_pop_insn_state(p);
3293 }
3294
3295 void
3296 brw_pixel_interpolator_query(struct brw_codegen *p,
3297 struct brw_reg dest,
3298 struct brw_reg mrf,
3299 bool noperspective,
3300 unsigned mode,
3301 struct brw_reg data,
3302 unsigned msg_length,
3303 unsigned response_length)
3304 {
3305 const struct gen_device_info *devinfo = p->devinfo;
3306 struct brw_inst *insn;
3307 const uint16_t exec_size = brw_get_default_exec_size(p);
3308 const uint16_t qtr_ctrl = brw_get_default_group(p) / 8;
3309
3310 /* brw_send_indirect_message will automatically use a direct send message
3311 * if data is actually immediate.
3312 */
3313 insn = brw_send_indirect_message(p,
3314 GEN7_SFID_PIXEL_INTERPOLATOR,
3315 dest,
3316 mrf,
3317 vec1(data), 0);
3318 brw_inst_set_mlen(devinfo, insn, msg_length);
3319 brw_inst_set_rlen(devinfo, insn, response_length);
3320
3321 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3322 brw_inst_set_pi_slot_group(devinfo, insn, qtr_ctrl / 2);
3323 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3324 brw_inst_set_pi_message_type(devinfo, insn, mode);
3325 }
3326
3327 void
3328 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3329 struct brw_reg mask)
3330 {
3331 const struct gen_device_info *devinfo = p->devinfo;
3332 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3333 const unsigned qtr_control = brw_get_default_group(p) / 8;
3334 brw_inst *inst;
3335
3336 assert(devinfo->gen >= 7);
3337 assert(mask.type == BRW_REGISTER_TYPE_UD);
3338
3339 brw_push_insn_state(p);
3340
3341 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3342 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3343
3344 if (devinfo->gen >= 8) {
3345 /* Getting the first active channel index is easy on Gen8: Just find
3346 * the first bit set in the execution mask. The register exists on
3347 * HSW already but it reads back as all ones when the current
3348 * instruction has execution masking disabled, so it's kind of
3349 * useless.
3350 */
3351 struct brw_reg exec_mask =
3352 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3353
3354 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3355 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3356 /* Unfortunately, ce0 does not take into account the thread
3357 * dispatch mask, which may be a problem in cases where it's not
3358 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3359 * some n). Combine ce0 with the given dispatch (or vector) mask
3360 * to mask off those channels which were never dispatched by the
3361 * hardware.
3362 */
3363 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3364 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3365 exec_mask = vec1(dst);
3366 }
3367
3368 /* Quarter control has the effect of magically shifting the value of
3369 * ce0 so you'll get the first active channel relative to the
3370 * specified quarter control as result.
3371 */
3372 inst = brw_FBL(p, vec1(dst), exec_mask);
3373 } else {
3374 const struct brw_reg flag = brw_flag_reg(p->current->flag_subreg / 2,
3375 p->current->flag_subreg % 2);
3376
3377 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3378 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3379
3380 /* Run enough instructions returning zero with execution masking and
3381 * a conditional modifier enabled in order to get the full execution
3382 * mask in f1.0. We could use a single 32-wide move here if it
3383 * weren't because of the hardware bug that causes channel enables to
3384 * be applied incorrectly to the second half of 32-wide instructions
3385 * on Gen7.
3386 */
3387 const unsigned lower_size = MIN2(16, exec_size);
3388 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3389 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3390 brw_imm_uw(0));
3391 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3392 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3393 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3394 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3395 }
3396
3397 /* Find the first bit set in the exec_size-wide portion of the flag
3398 * register that was updated by the last sequence of MOV
3399 * instructions.
3400 */
3401 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3402 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3403 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3404 }
3405 } else {
3406 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3407
3408 if (devinfo->gen >= 8 &&
3409 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3410 /* In SIMD4x2 mode the first active channel index is just the
3411 * negation of the first bit of the mask register. Note that ce0
3412 * doesn't take into account the dispatch mask, so the Gen7 path
3413 * should be used instead unless you have the guarantee that the
3414 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3415 * for some n).
3416 */
3417 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3418 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3419 brw_imm_ud(1));
3420
3421 } else {
3422 /* Overwrite the destination without and with execution masking to
3423 * find out which of the channels is active.
3424 */
3425 brw_push_insn_state(p);
3426 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3427 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3428 brw_imm_ud(1));
3429
3430 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3431 brw_imm_ud(0));
3432 brw_pop_insn_state(p);
3433 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3434 }
3435 }
3436
3437 brw_pop_insn_state(p);
3438 }
3439
3440 void
3441 brw_broadcast(struct brw_codegen *p,
3442 struct brw_reg dst,
3443 struct brw_reg src,
3444 struct brw_reg idx)
3445 {
3446 const struct gen_device_info *devinfo = p->devinfo;
3447 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3448 brw_inst *inst;
3449
3450 brw_push_insn_state(p);
3451 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3452 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3453
3454 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3455 src.address_mode == BRW_ADDRESS_DIRECT);
3456 assert(!src.abs && !src.negate);
3457 assert(src.type == dst.type);
3458
3459 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3460 idx.file == BRW_IMMEDIATE_VALUE) {
3461 /* Trivial, the source is already uniform or the index is a constant.
3462 * We will typically not get here if the optimizer is doing its job, but
3463 * asserting would be mean.
3464 */
3465 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3466 brw_MOV(p, dst,
3467 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3468 stride(suboffset(src, 4 * i), 0, 4, 1)));
3469 } else {
3470 /* From the Haswell PRM section "Register Region Restrictions":
3471 *
3472 * "The lower bits of the AddressImmediate must not overflow to
3473 * change the register address. The lower 5 bits of Address
3474 * Immediate when added to lower 5 bits of address register gives
3475 * the sub-register offset. The upper bits of Address Immediate
3476 * when added to upper bits of address register gives the register
3477 * address. Any overflow from sub-register offset is dropped."
3478 *
3479 * Fortunately, for broadcast, we never have a sub-register offset so
3480 * this isn't an issue.
3481 */
3482 assert(src.subnr == 0);
3483
3484 if (align1) {
3485 const struct brw_reg addr =
3486 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3487 unsigned offset = src.nr * REG_SIZE + src.subnr;
3488 /* Limit in bytes of the signed indirect addressing immediate. */
3489 const unsigned limit = 512;
3490
3491 brw_push_insn_state(p);
3492 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3493 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3494
3495 /* Take into account the component size and horizontal stride. */
3496 assert(src.vstride == src.hstride + src.width);
3497 brw_SHL(p, addr, vec1(idx),
3498 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3499 src.hstride - 1));
3500
3501 /* We can only address up to limit bytes using the indirect
3502 * addressing immediate, account for the difference if the source
3503 * register is above this limit.
3504 */
3505 if (offset >= limit) {
3506 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3507 offset = offset % limit;
3508 }
3509
3510 brw_pop_insn_state(p);
3511
3512 /* Use indirect addressing to fetch the specified component. */
3513 if (type_sz(src.type) > 4 &&
3514 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3515 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3516 *
3517 * "When source or destination datatype is 64b or operation is
3518 * integer DWord multiply, indirect addressing must not be
3519 * used."
3520 *
3521 * To work around both of this issue, we do two integer MOVs
3522 * insead of one 64-bit MOV. Because no double value should ever
3523 * cross a register boundary, it's safe to use the immediate
3524 * offset in the indirect here to handle adding 4 bytes to the
3525 * offset and avoid the extra ADD to the register file.
3526 */
3527 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3528 retype(brw_vec1_indirect(addr.subnr, offset),
3529 BRW_REGISTER_TYPE_D));
3530 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3531 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3532 BRW_REGISTER_TYPE_D));
3533 } else {
3534 brw_MOV(p, dst,
3535 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3536 }
3537 } else {
3538 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3539 * to all bits of a flag register,
3540 */
3541 inst = brw_MOV(p,
3542 brw_null_reg(),
3543 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3544 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3545 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3546 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3547
3548 /* and use predicated SEL to pick the right channel. */
3549 inst = brw_SEL(p, dst,
3550 stride(suboffset(src, 4), 4, 4, 1),
3551 stride(src, 4, 4, 1));
3552 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3553 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3554 }
3555 }
3556
3557 brw_pop_insn_state(p);
3558 }
3559
3560 /**
3561 * This instruction is generated as a single-channel align1 instruction by
3562 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3563 *
3564 * We can't use the typed atomic op in the FS because that has the execution
3565 * mask ANDed with the pixel mask, but we just want to write the one dword for
3566 * all the pixels.
3567 *
3568 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3569 * one u32. So we use the same untyped atomic write message as the pixel
3570 * shader.
3571 *
3572 * The untyped atomic operation requires a BUFFER surface type with RAW
3573 * format, and is only accessible through the legacy DATA_CACHE dataport
3574 * messages.
3575 */
3576 void brw_shader_time_add(struct brw_codegen *p,
3577 struct brw_reg payload,
3578 uint32_t surf_index)
3579 {
3580 const struct gen_device_info *devinfo = p->devinfo;
3581 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3582 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3583 GEN7_SFID_DATAPORT_DATA_CACHE);
3584 assert(devinfo->gen >= 7);
3585
3586 brw_push_insn_state(p);
3587 brw_set_default_access_mode(p, BRW_ALIGN_1);
3588 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3589 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3590 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3591
3592 /* We use brw_vec1_reg and unmasked because we want to increment the given
3593 * offset only once.
3594 */
3595 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3596 BRW_ARF_NULL, 0));
3597 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3598 payload.nr, 0));
3599 brw_set_src1(p, send, brw_imm_ud(0));
3600 brw_set_desc(p, send, brw_message_desc(devinfo, 2, 0, false));
3601 brw_inst_set_sfid(devinfo, send, sfid);
3602 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3603 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3604
3605 brw_pop_insn_state(p);
3606 }
3607
3608
3609 /**
3610 * Emit the SEND message for a barrier
3611 */
3612 void
3613 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3614 {
3615 const struct gen_device_info *devinfo = p->devinfo;
3616 struct brw_inst *inst;
3617
3618 assert(devinfo->gen >= 7);
3619
3620 brw_push_insn_state(p);
3621 brw_set_default_access_mode(p, BRW_ALIGN_1);
3622 inst = next_insn(p, BRW_OPCODE_SEND);
3623 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3624 brw_set_src0(p, inst, src);
3625 brw_set_src1(p, inst, brw_null_reg());
3626 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3627
3628 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3629 brw_inst_set_gateway_notify(devinfo, inst, 1);
3630 brw_inst_set_gateway_subfuncid(devinfo, inst,
3631 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3632
3633 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3634 brw_pop_insn_state(p);
3635 }
3636
3637
3638 /**
3639 * Emit the wait instruction for a barrier
3640 */
3641 void
3642 brw_WAIT(struct brw_codegen *p)
3643 {
3644 const struct gen_device_info *devinfo = p->devinfo;
3645 struct brw_inst *insn;
3646
3647 struct brw_reg src = brw_notification_reg();
3648
3649 insn = next_insn(p, BRW_OPCODE_WAIT);
3650 brw_set_dest(p, insn, src);
3651 brw_set_src0(p, insn, src);
3652 brw_set_src1(p, insn, brw_null_reg());
3653
3654 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3655 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3656 }
3657
3658 /**
3659 * Changes the floating point rounding mode updating the control register
3660 * field defined at cr0.0[5-6] bits. This function supports the changes to
3661 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3662 * Only RTNE and RTZ rounding are enabled at nir.
3663 */
3664 void
3665 brw_rounding_mode(struct brw_codegen *p,
3666 enum brw_rnd_mode mode)
3667 {
3668 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3669
3670 if (bits != BRW_CR0_RND_MODE_MASK) {
3671 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3672 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3673 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3674
3675 /* From the Skylake PRM, Volume 7, page 760:
3676 * "Implementation Restriction on Register Access: When the control
3677 * register is used as an explicit source and/or destination, hardware
3678 * does not ensure execution pipeline coherency. Software must set the
3679 * thread control field to ‘switch’ for an instruction that uses
3680 * control register as an explicit operand."
3681 */
3682 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3683 }
3684
3685 if (bits) {
3686 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3687 brw_imm_ud(bits));
3688 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3689 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3690 }
3691 }