intel/eu: Provide desc immediate argument up front to brw_send_indirect_message().
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Specify the descriptor and extended descriptor immediate for a SEND(C)
364 * message instruction.
365 */
366 void
367 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
368 unsigned desc, unsigned ex_desc)
369 {
370 const struct gen_device_info *devinfo = p->devinfo;
371 brw_inst_set_src1_file_type(devinfo, inst,
372 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_D);
373 brw_inst_set_send_desc(devinfo, inst, desc);
374 if (devinfo->gen >= 9 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
375 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC))
376 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
377 }
378
379 static void brw_set_math_message( struct brw_codegen *p,
380 brw_inst *inst,
381 unsigned function,
382 unsigned integer_type,
383 bool low_precision,
384 unsigned dataType )
385 {
386 const struct gen_device_info *devinfo = p->devinfo;
387 unsigned msg_length;
388 unsigned response_length;
389
390 /* Infer message length from the function */
391 switch (function) {
392 case BRW_MATH_FUNCTION_POW:
393 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
394 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
395 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
396 msg_length = 2;
397 break;
398 default:
399 msg_length = 1;
400 break;
401 }
402
403 /* Infer response length from the function */
404 switch (function) {
405 case BRW_MATH_FUNCTION_SINCOS:
406 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
407 response_length = 2;
408 break;
409 default:
410 response_length = 1;
411 break;
412 }
413
414 brw_set_desc(p, inst, brw_message_desc(
415 devinfo, msg_length, response_length, false));
416
417 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
418 brw_inst_set_math_msg_function(devinfo, inst, function);
419 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
420 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
421 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
422 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
423 brw_inst_set_saturate(devinfo, inst, 0);
424 }
425
426
427 static void brw_set_ff_sync_message(struct brw_codegen *p,
428 brw_inst *insn,
429 bool allocate,
430 unsigned response_length,
431 bool end_of_thread)
432 {
433 const struct gen_device_info *devinfo = p->devinfo;
434
435 brw_set_desc(p, insn, brw_message_desc(
436 devinfo, 1, response_length, true));
437
438 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
439 brw_inst_set_eot(devinfo, insn, end_of_thread);
440 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
441 brw_inst_set_urb_allocate(devinfo, insn, allocate);
442 /* The following fields are not used by FF_SYNC: */
443 brw_inst_set_urb_global_offset(devinfo, insn, 0);
444 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
445 brw_inst_set_urb_used(devinfo, insn, 0);
446 brw_inst_set_urb_complete(devinfo, insn, 0);
447 }
448
449 static void brw_set_urb_message( struct brw_codegen *p,
450 brw_inst *insn,
451 enum brw_urb_write_flags flags,
452 unsigned msg_length,
453 unsigned response_length,
454 unsigned offset,
455 unsigned swizzle_control )
456 {
457 const struct gen_device_info *devinfo = p->devinfo;
458
459 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
460 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
461 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
462
463 brw_set_desc(p, insn, brw_message_desc(
464 devinfo, msg_length, response_length, true));
465
466 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
467 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
468
469 if (flags & BRW_URB_WRITE_OWORD) {
470 assert(msg_length == 2); /* header + one OWORD of data */
471 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
472 } else {
473 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
474 }
475
476 brw_inst_set_urb_global_offset(devinfo, insn, offset);
477 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
478
479 if (devinfo->gen < 8) {
480 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
481 }
482
483 if (devinfo->gen < 7) {
484 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
485 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
486 } else {
487 brw_inst_set_urb_per_slot_offset(devinfo, insn,
488 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
489 }
490 }
491
492 void
493 brw_set_dp_write_message(struct brw_codegen *p,
494 brw_inst *insn,
495 unsigned binding_table_index,
496 unsigned msg_control,
497 unsigned msg_type,
498 unsigned target_cache,
499 unsigned msg_length,
500 bool header_present,
501 unsigned last_render_target,
502 unsigned response_length,
503 unsigned end_of_thread,
504 unsigned send_commit_msg)
505 {
506 const struct gen_device_info *devinfo = p->devinfo;
507 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
508 BRW_SFID_DATAPORT_WRITE);
509
510 brw_set_desc(p, insn, brw_message_desc(
511 devinfo, msg_length, response_length, header_present));
512
513 brw_inst_set_sfid(devinfo, insn, sfid);
514 brw_inst_set_eot(devinfo, insn, !!end_of_thread);
515 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
516 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
517 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
518 brw_inst_set_rt_last(devinfo, insn, last_render_target);
519 if (devinfo->gen < 7) {
520 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
521 }
522
523 if (devinfo->gen >= 11)
524 brw_inst_set_null_rt(devinfo, insn, false);
525 }
526
527 void
528 brw_set_dp_read_message(struct brw_codegen *p,
529 brw_inst *insn,
530 unsigned binding_table_index,
531 unsigned msg_control,
532 unsigned msg_type,
533 unsigned target_cache,
534 unsigned msg_length,
535 bool header_present,
536 unsigned response_length)
537 {
538 const struct gen_device_info *devinfo = p->devinfo;
539 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
540 BRW_SFID_DATAPORT_READ);
541
542 brw_set_desc(p, insn, brw_message_desc(
543 devinfo, msg_length, response_length, header_present));
544
545 const unsigned opcode = brw_inst_opcode(devinfo, insn);
546 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC)
547 brw_inst_set_sfid(devinfo, insn, sfid);
548 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
549 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
550 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
551 if (devinfo->gen < 6)
552 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
553 }
554
555 void
556 brw_set_sampler_message(struct brw_codegen *p,
557 brw_inst *inst,
558 unsigned binding_table_index,
559 unsigned sampler,
560 unsigned msg_type,
561 unsigned response_length,
562 unsigned msg_length,
563 unsigned header_present,
564 unsigned simd_mode,
565 unsigned return_format)
566 {
567 const struct gen_device_info *devinfo = p->devinfo;
568
569 brw_set_desc(p, inst, brw_message_desc(
570 devinfo, msg_length, response_length, header_present));
571
572 const unsigned opcode = brw_inst_opcode(devinfo, inst);
573 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC)
574 brw_inst_set_sfid(devinfo, inst, BRW_SFID_SAMPLER);
575 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
576 brw_inst_set_sampler(devinfo, inst, sampler);
577 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
578 if (devinfo->gen >= 5) {
579 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
580 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
581 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
582 }
583 }
584
585 static void
586 gen7_set_dp_scratch_message(struct brw_codegen *p,
587 brw_inst *inst,
588 bool write,
589 bool dword,
590 bool invalidate_after_read,
591 unsigned num_regs,
592 unsigned addr_offset,
593 unsigned mlen,
594 unsigned rlen,
595 bool header_present)
596 {
597 const struct gen_device_info *devinfo = p->devinfo;
598 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
599 (devinfo->gen >= 8 && num_regs == 8));
600 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
601 num_regs - 1);
602
603 brw_set_desc(p, inst, brw_message_desc(
604 devinfo, mlen, rlen, header_present));
605
606 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
607 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
608 brw_inst_set_scratch_read_write(devinfo, inst, write);
609 brw_inst_set_scratch_type(devinfo, inst, dword);
610 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
611 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
612 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
613 }
614
615 static void
616 brw_inst_set_state(const struct gen_device_info *devinfo,
617 brw_inst *insn,
618 const struct brw_insn_state *state)
619 {
620 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
621 brw_inst_set_group(devinfo, insn, state->group);
622 brw_inst_set_compression(devinfo, insn, state->compressed);
623 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
624 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
625 brw_inst_set_saturate(devinfo, insn, state->saturate);
626 brw_inst_set_pred_control(devinfo, insn, state->predicate);
627 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
628
629 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
630 state->access_mode == BRW_ALIGN_16) {
631 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
632 if (devinfo->gen >= 7)
633 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
634 } else {
635 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
636 if (devinfo->gen >= 7)
637 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
638 }
639
640 if (devinfo->gen >= 6)
641 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
642 }
643
644 #define next_insn brw_next_insn
645 brw_inst *
646 brw_next_insn(struct brw_codegen *p, unsigned opcode)
647 {
648 const struct gen_device_info *devinfo = p->devinfo;
649 brw_inst *insn;
650
651 if (p->nr_insn + 1 > p->store_size) {
652 p->store_size <<= 1;
653 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
654 }
655
656 p->next_insn_offset += 16;
657 insn = &p->store[p->nr_insn++];
658
659 memset(insn, 0, sizeof(*insn));
660 brw_inst_set_opcode(devinfo, insn, opcode);
661
662 /* Apply the default instruction state */
663 brw_inst_set_state(devinfo, insn, p->current);
664
665 return insn;
666 }
667
668 static brw_inst *
669 brw_alu1(struct brw_codegen *p, unsigned opcode,
670 struct brw_reg dest, struct brw_reg src)
671 {
672 brw_inst *insn = next_insn(p, opcode);
673 brw_set_dest(p, insn, dest);
674 brw_set_src0(p, insn, src);
675 return insn;
676 }
677
678 static brw_inst *
679 brw_alu2(struct brw_codegen *p, unsigned opcode,
680 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
681 {
682 /* 64-bit immediates are only supported on 1-src instructions */
683 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
684 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
685
686 brw_inst *insn = next_insn(p, opcode);
687 brw_set_dest(p, insn, dest);
688 brw_set_src0(p, insn, src0);
689 brw_set_src1(p, insn, src1);
690 return insn;
691 }
692
693 static int
694 get_3src_subreg_nr(struct brw_reg reg)
695 {
696 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
697 * use 32-bit units (components 0..7). Since they only support F/D/UD
698 * types, this doesn't lose any flexibility, but uses fewer bits.
699 */
700 return reg.subnr / 4;
701 }
702
703 static enum gen10_align1_3src_vertical_stride
704 to_3src_align1_vstride(enum brw_vertical_stride vstride)
705 {
706 switch (vstride) {
707 case BRW_VERTICAL_STRIDE_0:
708 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
709 case BRW_VERTICAL_STRIDE_2:
710 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
711 case BRW_VERTICAL_STRIDE_4:
712 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
713 case BRW_VERTICAL_STRIDE_8:
714 case BRW_VERTICAL_STRIDE_16:
715 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
716 default:
717 unreachable("invalid vstride");
718 }
719 }
720
721
722 static enum gen10_align1_3src_src_horizontal_stride
723 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
724 {
725 switch (hstride) {
726 case BRW_HORIZONTAL_STRIDE_0:
727 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
728 case BRW_HORIZONTAL_STRIDE_1:
729 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
730 case BRW_HORIZONTAL_STRIDE_2:
731 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
732 case BRW_HORIZONTAL_STRIDE_4:
733 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
734 default:
735 unreachable("invalid hstride");
736 }
737 }
738
739 static brw_inst *
740 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
741 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
742 {
743 const struct gen_device_info *devinfo = p->devinfo;
744 brw_inst *inst = next_insn(p, opcode);
745
746 gen7_convert_mrf_to_grf(p, &dest);
747
748 assert(dest.nr < 128);
749 assert(src0.nr < 128);
750 assert(src1.nr < 128);
751 assert(src2.nr < 128);
752 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
753 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
754 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
755 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
756
757 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
758 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
759 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
760
761 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
762 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
763 BRW_ALIGN1_3SRC_ACCUMULATOR);
764 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
765 } else {
766 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
767 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
768 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
769 }
770 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
771
772 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
773
774 if (brw_reg_type_is_floating_point(dest.type)) {
775 brw_inst_set_3src_a1_exec_type(devinfo, inst,
776 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
777 } else {
778 brw_inst_set_3src_a1_exec_type(devinfo, inst,
779 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
780 }
781
782 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
783 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
784 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
785 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
786
787 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
788 to_3src_align1_vstride(src0.vstride));
789 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
790 to_3src_align1_vstride(src1.vstride));
791 /* no vstride on src2 */
792
793 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
794 to_3src_align1_hstride(src0.hstride));
795 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
796 to_3src_align1_hstride(src1.hstride));
797 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
798 to_3src_align1_hstride(src2.hstride));
799
800 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
801 if (src0.type == BRW_REGISTER_TYPE_NF) {
802 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
803 } else {
804 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
805 }
806 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
807 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
808
809 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
810 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
811 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
812 } else {
813 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
814 }
815 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
816 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
817
818 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
819 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
820 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
821 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
822
823 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
824 src0.file == BRW_IMMEDIATE_VALUE ||
825 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
826 src0.type == BRW_REGISTER_TYPE_NF));
827 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
828 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
829 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
830 src2.file == BRW_IMMEDIATE_VALUE);
831
832 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
833 src0.file == BRW_GENERAL_REGISTER_FILE ?
834 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
835 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
836 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
837 src1.file == BRW_GENERAL_REGISTER_FILE ?
838 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
839 BRW_ALIGN1_3SRC_ACCUMULATOR);
840 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
841 src2.file == BRW_GENERAL_REGISTER_FILE ?
842 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
843 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
844 } else {
845 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
846 dest.file == BRW_MESSAGE_REGISTER_FILE);
847 assert(dest.type == BRW_REGISTER_TYPE_F ||
848 dest.type == BRW_REGISTER_TYPE_DF ||
849 dest.type == BRW_REGISTER_TYPE_D ||
850 dest.type == BRW_REGISTER_TYPE_UD);
851 if (devinfo->gen == 6) {
852 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
853 dest.file == BRW_MESSAGE_REGISTER_FILE);
854 }
855 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
856 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
857 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
858
859 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
860 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
861 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
862 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
863 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
864 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
865 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
866 src0.vstride == BRW_VERTICAL_STRIDE_0);
867
868 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
869 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
870 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
871 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
872 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
873 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
874 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
875 src1.vstride == BRW_VERTICAL_STRIDE_0);
876
877 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
878 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
879 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
880 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
881 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
882 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
883 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
884 src2.vstride == BRW_VERTICAL_STRIDE_0);
885
886 if (devinfo->gen >= 7) {
887 /* Set both the source and destination types based on dest.type,
888 * ignoring the source register types. The MAD and LRP emitters ensure
889 * that all four types are float. The BFE and BFI2 emitters, however,
890 * may send us mixed D and UD types and want us to ignore that and use
891 * the destination type.
892 */
893 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
894 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
895 }
896 }
897
898 return inst;
899 }
900
901
902 /***********************************************************************
903 * Convenience routines.
904 */
905 #define ALU1(OP) \
906 brw_inst *brw_##OP(struct brw_codegen *p, \
907 struct brw_reg dest, \
908 struct brw_reg src0) \
909 { \
910 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
911 }
912
913 #define ALU2(OP) \
914 brw_inst *brw_##OP(struct brw_codegen *p, \
915 struct brw_reg dest, \
916 struct brw_reg src0, \
917 struct brw_reg src1) \
918 { \
919 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
920 }
921
922 #define ALU3(OP) \
923 brw_inst *brw_##OP(struct brw_codegen *p, \
924 struct brw_reg dest, \
925 struct brw_reg src0, \
926 struct brw_reg src1, \
927 struct brw_reg src2) \
928 { \
929 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
930 }
931
932 #define ALU3F(OP) \
933 brw_inst *brw_##OP(struct brw_codegen *p, \
934 struct brw_reg dest, \
935 struct brw_reg src0, \
936 struct brw_reg src1, \
937 struct brw_reg src2) \
938 { \
939 assert(dest.type == BRW_REGISTER_TYPE_F || \
940 dest.type == BRW_REGISTER_TYPE_DF); \
941 if (dest.type == BRW_REGISTER_TYPE_F) { \
942 assert(src0.type == BRW_REGISTER_TYPE_F); \
943 assert(src1.type == BRW_REGISTER_TYPE_F); \
944 assert(src2.type == BRW_REGISTER_TYPE_F); \
945 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
946 assert(src0.type == BRW_REGISTER_TYPE_DF); \
947 assert(src1.type == BRW_REGISTER_TYPE_DF); \
948 assert(src2.type == BRW_REGISTER_TYPE_DF); \
949 } \
950 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
951 }
952
953 /* Rounding operations (other than RNDD) require two instructions - the first
954 * stores a rounded value (possibly the wrong way) in the dest register, but
955 * also sets a per-channel "increment bit" in the flag register. A predicated
956 * add of 1.0 fixes dest to contain the desired result.
957 *
958 * Sandybridge and later appear to round correctly without an ADD.
959 */
960 #define ROUND(OP) \
961 void brw_##OP(struct brw_codegen *p, \
962 struct brw_reg dest, \
963 struct brw_reg src) \
964 { \
965 const struct gen_device_info *devinfo = p->devinfo; \
966 brw_inst *rnd, *add; \
967 rnd = next_insn(p, BRW_OPCODE_##OP); \
968 brw_set_dest(p, rnd, dest); \
969 brw_set_src0(p, rnd, src); \
970 \
971 if (devinfo->gen < 6) { \
972 /* turn on round-increments */ \
973 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
974 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
975 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
976 } \
977 }
978
979
980 ALU2(SEL)
981 ALU1(NOT)
982 ALU2(AND)
983 ALU2(OR)
984 ALU2(XOR)
985 ALU2(SHR)
986 ALU2(SHL)
987 ALU1(DIM)
988 ALU2(ASR)
989 ALU3(CSEL)
990 ALU1(FRC)
991 ALU1(RNDD)
992 ALU2(MAC)
993 ALU2(MACH)
994 ALU1(LZD)
995 ALU2(DP4)
996 ALU2(DPH)
997 ALU2(DP3)
998 ALU2(DP2)
999 ALU3(MAD)
1000 ALU3F(LRP)
1001 ALU1(BFREV)
1002 ALU3(BFE)
1003 ALU2(BFI1)
1004 ALU3(BFI2)
1005 ALU1(FBH)
1006 ALU1(FBL)
1007 ALU1(CBIT)
1008 ALU2(ADDC)
1009 ALU2(SUBB)
1010
1011 ROUND(RNDZ)
1012 ROUND(RNDE)
1013
1014 brw_inst *
1015 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
1016 {
1017 const struct gen_device_info *devinfo = p->devinfo;
1018
1019 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
1020 * To avoid the problems that causes, we use a <1,2,0> source region to read
1021 * each element twice.
1022 */
1023 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1024 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
1025 dest.type == BRW_REGISTER_TYPE_DF &&
1026 (src0.type == BRW_REGISTER_TYPE_F ||
1027 src0.type == BRW_REGISTER_TYPE_D ||
1028 src0.type == BRW_REGISTER_TYPE_UD) &&
1029 !has_scalar_region(src0)) {
1030 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
1031 src0.width == BRW_WIDTH_4 &&
1032 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1033
1034 src0.vstride = BRW_VERTICAL_STRIDE_1;
1035 src0.width = BRW_WIDTH_2;
1036 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1037 }
1038
1039 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1040 }
1041
1042 brw_inst *
1043 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1044 struct brw_reg src0, struct brw_reg src1)
1045 {
1046 /* 6.2.2: add */
1047 if (src0.type == BRW_REGISTER_TYPE_F ||
1048 (src0.file == BRW_IMMEDIATE_VALUE &&
1049 src0.type == BRW_REGISTER_TYPE_VF)) {
1050 assert(src1.type != BRW_REGISTER_TYPE_UD);
1051 assert(src1.type != BRW_REGISTER_TYPE_D);
1052 }
1053
1054 if (src1.type == BRW_REGISTER_TYPE_F ||
1055 (src1.file == BRW_IMMEDIATE_VALUE &&
1056 src1.type == BRW_REGISTER_TYPE_VF)) {
1057 assert(src0.type != BRW_REGISTER_TYPE_UD);
1058 assert(src0.type != BRW_REGISTER_TYPE_D);
1059 }
1060
1061 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1062 }
1063
1064 brw_inst *
1065 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1066 struct brw_reg src0, struct brw_reg src1)
1067 {
1068 assert(dest.type == src0.type);
1069 assert(src0.type == src1.type);
1070 switch (src0.type) {
1071 case BRW_REGISTER_TYPE_B:
1072 case BRW_REGISTER_TYPE_UB:
1073 case BRW_REGISTER_TYPE_W:
1074 case BRW_REGISTER_TYPE_UW:
1075 case BRW_REGISTER_TYPE_D:
1076 case BRW_REGISTER_TYPE_UD:
1077 break;
1078 default:
1079 unreachable("Bad type for brw_AVG");
1080 }
1081
1082 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1083 }
1084
1085 brw_inst *
1086 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1087 struct brw_reg src0, struct brw_reg src1)
1088 {
1089 /* 6.32.38: mul */
1090 if (src0.type == BRW_REGISTER_TYPE_D ||
1091 src0.type == BRW_REGISTER_TYPE_UD ||
1092 src1.type == BRW_REGISTER_TYPE_D ||
1093 src1.type == BRW_REGISTER_TYPE_UD) {
1094 assert(dest.type != BRW_REGISTER_TYPE_F);
1095 }
1096
1097 if (src0.type == BRW_REGISTER_TYPE_F ||
1098 (src0.file == BRW_IMMEDIATE_VALUE &&
1099 src0.type == BRW_REGISTER_TYPE_VF)) {
1100 assert(src1.type != BRW_REGISTER_TYPE_UD);
1101 assert(src1.type != BRW_REGISTER_TYPE_D);
1102 }
1103
1104 if (src1.type == BRW_REGISTER_TYPE_F ||
1105 (src1.file == BRW_IMMEDIATE_VALUE &&
1106 src1.type == BRW_REGISTER_TYPE_VF)) {
1107 assert(src0.type != BRW_REGISTER_TYPE_UD);
1108 assert(src0.type != BRW_REGISTER_TYPE_D);
1109 }
1110
1111 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1112 src0.nr != BRW_ARF_ACCUMULATOR);
1113 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1114 src1.nr != BRW_ARF_ACCUMULATOR);
1115
1116 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1117 }
1118
1119 brw_inst *
1120 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1121 struct brw_reg src0, struct brw_reg src1)
1122 {
1123 src0.vstride = BRW_VERTICAL_STRIDE_0;
1124 src0.width = BRW_WIDTH_1;
1125 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1126 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1127 }
1128
1129 brw_inst *
1130 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1131 struct brw_reg src0, struct brw_reg src1)
1132 {
1133 src0.vstride = BRW_VERTICAL_STRIDE_0;
1134 src0.width = BRW_WIDTH_1;
1135 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1136 src1.vstride = BRW_VERTICAL_STRIDE_8;
1137 src1.width = BRW_WIDTH_8;
1138 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1139 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1140 }
1141
1142 brw_inst *
1143 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1144 {
1145 const struct gen_device_info *devinfo = p->devinfo;
1146 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1147 /* The F32TO16 instruction doesn't support 32-bit destination types in
1148 * Align1 mode, and neither does the Gen8 implementation in terms of a
1149 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1150 * an undocumented feature.
1151 */
1152 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1153 (!align16 || devinfo->gen >= 8));
1154 brw_inst *inst;
1155
1156 if (align16) {
1157 assert(dst.type == BRW_REGISTER_TYPE_UD);
1158 } else {
1159 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1160 dst.type == BRW_REGISTER_TYPE_W ||
1161 dst.type == BRW_REGISTER_TYPE_UW ||
1162 dst.type == BRW_REGISTER_TYPE_HF);
1163 }
1164
1165 brw_push_insn_state(p);
1166
1167 if (needs_zero_fill) {
1168 brw_set_default_access_mode(p, BRW_ALIGN_1);
1169 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1170 }
1171
1172 if (devinfo->gen >= 8) {
1173 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1174 } else {
1175 assert(devinfo->gen == 7);
1176 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1177 }
1178
1179 if (needs_zero_fill) {
1180 brw_inst_set_no_dd_clear(devinfo, inst, true);
1181 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1182 brw_inst_set_no_dd_check(devinfo, inst, true);
1183 }
1184
1185 brw_pop_insn_state(p);
1186 return inst;
1187 }
1188
1189 brw_inst *
1190 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1191 {
1192 const struct gen_device_info *devinfo = p->devinfo;
1193 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1194
1195 if (align16) {
1196 assert(src.type == BRW_REGISTER_TYPE_UD);
1197 } else {
1198 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1199 *
1200 * Because this instruction does not have a 16-bit floating-point
1201 * type, the source data type must be Word (W). The destination type
1202 * must be F (Float).
1203 */
1204 if (src.type == BRW_REGISTER_TYPE_UD)
1205 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1206
1207 assert(src.type == BRW_REGISTER_TYPE_W ||
1208 src.type == BRW_REGISTER_TYPE_UW ||
1209 src.type == BRW_REGISTER_TYPE_HF);
1210 }
1211
1212 if (devinfo->gen >= 8) {
1213 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1214 } else {
1215 assert(devinfo->gen == 7);
1216 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1217 }
1218 }
1219
1220
1221 void brw_NOP(struct brw_codegen *p)
1222 {
1223 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1224 memset(insn, 0, sizeof(*insn));
1225 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1226 }
1227
1228
1229
1230
1231
1232 /***********************************************************************
1233 * Comparisons, if/else/endif
1234 */
1235
1236 brw_inst *
1237 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1238 unsigned predicate_control)
1239 {
1240 const struct gen_device_info *devinfo = p->devinfo;
1241 struct brw_reg ip = brw_ip_reg();
1242 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1243
1244 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1245 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1246 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1247 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1248
1249 return inst;
1250 }
1251
1252 static void
1253 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1254 {
1255 p->if_stack[p->if_stack_depth] = inst - p->store;
1256
1257 p->if_stack_depth++;
1258 if (p->if_stack_array_size <= p->if_stack_depth) {
1259 p->if_stack_array_size *= 2;
1260 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1261 p->if_stack_array_size);
1262 }
1263 }
1264
1265 static brw_inst *
1266 pop_if_stack(struct brw_codegen *p)
1267 {
1268 p->if_stack_depth--;
1269 return &p->store[p->if_stack[p->if_stack_depth]];
1270 }
1271
1272 static void
1273 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1274 {
1275 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1276 p->loop_stack_array_size *= 2;
1277 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1278 p->loop_stack_array_size);
1279 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1280 p->loop_stack_array_size);
1281 }
1282
1283 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1284 p->loop_stack_depth++;
1285 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1286 }
1287
1288 static brw_inst *
1289 get_inner_do_insn(struct brw_codegen *p)
1290 {
1291 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1292 }
1293
1294 /* EU takes the value from the flag register and pushes it onto some
1295 * sort of a stack (presumably merging with any flag value already on
1296 * the stack). Within an if block, the flags at the top of the stack
1297 * control execution on each channel of the unit, eg. on each of the
1298 * 16 pixel values in our wm programs.
1299 *
1300 * When the matching 'else' instruction is reached (presumably by
1301 * countdown of the instruction count patched in by our ELSE/ENDIF
1302 * functions), the relevant flags are inverted.
1303 *
1304 * When the matching 'endif' instruction is reached, the flags are
1305 * popped off. If the stack is now empty, normal execution resumes.
1306 */
1307 brw_inst *
1308 brw_IF(struct brw_codegen *p, unsigned execute_size)
1309 {
1310 const struct gen_device_info *devinfo = p->devinfo;
1311 brw_inst *insn;
1312
1313 insn = next_insn(p, BRW_OPCODE_IF);
1314
1315 /* Override the defaults for this instruction:
1316 */
1317 if (devinfo->gen < 6) {
1318 brw_set_dest(p, insn, brw_ip_reg());
1319 brw_set_src0(p, insn, brw_ip_reg());
1320 brw_set_src1(p, insn, brw_imm_d(0x0));
1321 } else if (devinfo->gen == 6) {
1322 brw_set_dest(p, insn, brw_imm_w(0));
1323 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1324 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1325 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1326 } else if (devinfo->gen == 7) {
1327 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1328 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1329 brw_set_src1(p, insn, brw_imm_w(0));
1330 brw_inst_set_jip(devinfo, insn, 0);
1331 brw_inst_set_uip(devinfo, insn, 0);
1332 } else {
1333 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1334 brw_set_src0(p, insn, brw_imm_d(0));
1335 brw_inst_set_jip(devinfo, insn, 0);
1336 brw_inst_set_uip(devinfo, insn, 0);
1337 }
1338
1339 brw_inst_set_exec_size(devinfo, insn, execute_size);
1340 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1341 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1342 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1343 if (!p->single_program_flow && devinfo->gen < 6)
1344 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1345
1346 push_if_stack(p, insn);
1347 p->if_depth_in_loop[p->loop_stack_depth]++;
1348 return insn;
1349 }
1350
1351 /* This function is only used for gen6-style IF instructions with an
1352 * embedded comparison (conditional modifier). It is not used on gen7.
1353 */
1354 brw_inst *
1355 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1356 struct brw_reg src0, struct brw_reg src1)
1357 {
1358 const struct gen_device_info *devinfo = p->devinfo;
1359 brw_inst *insn;
1360
1361 insn = next_insn(p, BRW_OPCODE_IF);
1362
1363 brw_set_dest(p, insn, brw_imm_w(0));
1364 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1365 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1366 brw_set_src0(p, insn, src0);
1367 brw_set_src1(p, insn, src1);
1368
1369 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1370 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1371 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1372
1373 push_if_stack(p, insn);
1374 return insn;
1375 }
1376
1377 /**
1378 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1379 */
1380 static void
1381 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1382 brw_inst *if_inst, brw_inst *else_inst)
1383 {
1384 const struct gen_device_info *devinfo = p->devinfo;
1385
1386 /* The next instruction (where the ENDIF would be, if it existed) */
1387 brw_inst *next_inst = &p->store[p->nr_insn];
1388
1389 assert(p->single_program_flow);
1390 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1391 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1392 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1393
1394 /* Convert IF to an ADD instruction that moves the instruction pointer
1395 * to the first instruction of the ELSE block. If there is no ELSE
1396 * block, point to where ENDIF would be. Reverse the predicate.
1397 *
1398 * There's no need to execute an ENDIF since we don't need to do any
1399 * stack operations, and if we're currently executing, we just want to
1400 * continue normally.
1401 */
1402 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1403 brw_inst_set_pred_inv(devinfo, if_inst, true);
1404
1405 if (else_inst != NULL) {
1406 /* Convert ELSE to an ADD instruction that points where the ENDIF
1407 * would be.
1408 */
1409 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1410
1411 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1412 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1413 } else {
1414 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1415 }
1416 }
1417
1418 /**
1419 * Patch IF and ELSE instructions with appropriate jump targets.
1420 */
1421 static void
1422 patch_IF_ELSE(struct brw_codegen *p,
1423 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1424 {
1425 const struct gen_device_info *devinfo = p->devinfo;
1426
1427 /* We shouldn't be patching IF and ELSE instructions in single program flow
1428 * mode when gen < 6, because in single program flow mode on those
1429 * platforms, we convert flow control instructions to conditional ADDs that
1430 * operate on IP (see brw_ENDIF).
1431 *
1432 * However, on Gen6, writing to IP doesn't work in single program flow mode
1433 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1434 * not be updated by non-flow control instructions."). And on later
1435 * platforms, there is no significant benefit to converting control flow
1436 * instructions to conditional ADDs. So we do patch IF and ELSE
1437 * instructions in single program flow mode on those platforms.
1438 */
1439 if (devinfo->gen < 6)
1440 assert(!p->single_program_flow);
1441
1442 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1443 assert(endif_inst != NULL);
1444 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1445
1446 unsigned br = brw_jump_scale(devinfo);
1447
1448 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1449 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1450
1451 if (else_inst == NULL) {
1452 /* Patch IF -> ENDIF */
1453 if (devinfo->gen < 6) {
1454 /* Turn it into an IFF, which means no mask stack operations for
1455 * all-false and jumping past the ENDIF.
1456 */
1457 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1458 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1459 br * (endif_inst - if_inst + 1));
1460 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1461 } else if (devinfo->gen == 6) {
1462 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1463 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1464 } else {
1465 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1466 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1467 }
1468 } else {
1469 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1470
1471 /* Patch IF -> ELSE */
1472 if (devinfo->gen < 6) {
1473 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1474 br * (else_inst - if_inst));
1475 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1476 } else if (devinfo->gen == 6) {
1477 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1478 br * (else_inst - if_inst + 1));
1479 }
1480
1481 /* Patch ELSE -> ENDIF */
1482 if (devinfo->gen < 6) {
1483 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1484 * matching ENDIF.
1485 */
1486 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1487 br * (endif_inst - else_inst + 1));
1488 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1489 } else if (devinfo->gen == 6) {
1490 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1491 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1492 br * (endif_inst - else_inst));
1493 } else {
1494 /* The IF instruction's JIP should point just past the ELSE */
1495 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1496 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1497 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1498 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1499 if (devinfo->gen >= 8) {
1500 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1501 * should point to ENDIF.
1502 */
1503 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1504 }
1505 }
1506 }
1507 }
1508
1509 void
1510 brw_ELSE(struct brw_codegen *p)
1511 {
1512 const struct gen_device_info *devinfo = p->devinfo;
1513 brw_inst *insn;
1514
1515 insn = next_insn(p, BRW_OPCODE_ELSE);
1516
1517 if (devinfo->gen < 6) {
1518 brw_set_dest(p, insn, brw_ip_reg());
1519 brw_set_src0(p, insn, brw_ip_reg());
1520 brw_set_src1(p, insn, brw_imm_d(0x0));
1521 } else if (devinfo->gen == 6) {
1522 brw_set_dest(p, insn, brw_imm_w(0));
1523 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1524 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1525 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1526 } else if (devinfo->gen == 7) {
1527 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1528 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1529 brw_set_src1(p, insn, brw_imm_w(0));
1530 brw_inst_set_jip(devinfo, insn, 0);
1531 brw_inst_set_uip(devinfo, insn, 0);
1532 } else {
1533 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1534 brw_set_src0(p, insn, brw_imm_d(0));
1535 brw_inst_set_jip(devinfo, insn, 0);
1536 brw_inst_set_uip(devinfo, insn, 0);
1537 }
1538
1539 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1540 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1541 if (!p->single_program_flow && devinfo->gen < 6)
1542 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1543
1544 push_if_stack(p, insn);
1545 }
1546
1547 void
1548 brw_ENDIF(struct brw_codegen *p)
1549 {
1550 const struct gen_device_info *devinfo = p->devinfo;
1551 brw_inst *insn = NULL;
1552 brw_inst *else_inst = NULL;
1553 brw_inst *if_inst = NULL;
1554 brw_inst *tmp;
1555 bool emit_endif = true;
1556
1557 /* In single program flow mode, we can express IF and ELSE instructions
1558 * equivalently as ADD instructions that operate on IP. On platforms prior
1559 * to Gen6, flow control instructions cause an implied thread switch, so
1560 * this is a significant savings.
1561 *
1562 * However, on Gen6, writing to IP doesn't work in single program flow mode
1563 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1564 * not be updated by non-flow control instructions."). And on later
1565 * platforms, there is no significant benefit to converting control flow
1566 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1567 * Gen5.
1568 */
1569 if (devinfo->gen < 6 && p->single_program_flow)
1570 emit_endif = false;
1571
1572 /*
1573 * A single next_insn() may change the base address of instruction store
1574 * memory(p->store), so call it first before referencing the instruction
1575 * store pointer from an index
1576 */
1577 if (emit_endif)
1578 insn = next_insn(p, BRW_OPCODE_ENDIF);
1579
1580 /* Pop the IF and (optional) ELSE instructions from the stack */
1581 p->if_depth_in_loop[p->loop_stack_depth]--;
1582 tmp = pop_if_stack(p);
1583 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1584 else_inst = tmp;
1585 tmp = pop_if_stack(p);
1586 }
1587 if_inst = tmp;
1588
1589 if (!emit_endif) {
1590 /* ENDIF is useless; don't bother emitting it. */
1591 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1592 return;
1593 }
1594
1595 if (devinfo->gen < 6) {
1596 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1597 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1598 brw_set_src1(p, insn, brw_imm_d(0x0));
1599 } else if (devinfo->gen == 6) {
1600 brw_set_dest(p, insn, brw_imm_w(0));
1601 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1602 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1603 } else if (devinfo->gen == 7) {
1604 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1605 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1606 brw_set_src1(p, insn, brw_imm_w(0));
1607 } else {
1608 brw_set_src0(p, insn, brw_imm_d(0));
1609 }
1610
1611 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1612 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1613 if (devinfo->gen < 6)
1614 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1615
1616 /* Also pop item off the stack in the endif instruction: */
1617 if (devinfo->gen < 6) {
1618 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1619 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1620 } else if (devinfo->gen == 6) {
1621 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1622 } else {
1623 brw_inst_set_jip(devinfo, insn, 2);
1624 }
1625 patch_IF_ELSE(p, if_inst, else_inst, insn);
1626 }
1627
1628 brw_inst *
1629 brw_BREAK(struct brw_codegen *p)
1630 {
1631 const struct gen_device_info *devinfo = p->devinfo;
1632 brw_inst *insn;
1633
1634 insn = next_insn(p, BRW_OPCODE_BREAK);
1635 if (devinfo->gen >= 8) {
1636 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1637 brw_set_src0(p, insn, brw_imm_d(0x0));
1638 } else if (devinfo->gen >= 6) {
1639 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1640 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1641 brw_set_src1(p, insn, brw_imm_d(0x0));
1642 } else {
1643 brw_set_dest(p, insn, brw_ip_reg());
1644 brw_set_src0(p, insn, brw_ip_reg());
1645 brw_set_src1(p, insn, brw_imm_d(0x0));
1646 brw_inst_set_gen4_pop_count(devinfo, insn,
1647 p->if_depth_in_loop[p->loop_stack_depth]);
1648 }
1649 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1650 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1651
1652 return insn;
1653 }
1654
1655 brw_inst *
1656 brw_CONT(struct brw_codegen *p)
1657 {
1658 const struct gen_device_info *devinfo = p->devinfo;
1659 brw_inst *insn;
1660
1661 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1662 brw_set_dest(p, insn, brw_ip_reg());
1663 if (devinfo->gen >= 8) {
1664 brw_set_src0(p, insn, brw_imm_d(0x0));
1665 } else {
1666 brw_set_src0(p, insn, brw_ip_reg());
1667 brw_set_src1(p, insn, brw_imm_d(0x0));
1668 }
1669
1670 if (devinfo->gen < 6) {
1671 brw_inst_set_gen4_pop_count(devinfo, insn,
1672 p->if_depth_in_loop[p->loop_stack_depth]);
1673 }
1674 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1675 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1676 return insn;
1677 }
1678
1679 brw_inst *
1680 gen6_HALT(struct brw_codegen *p)
1681 {
1682 const struct gen_device_info *devinfo = p->devinfo;
1683 brw_inst *insn;
1684
1685 insn = next_insn(p, BRW_OPCODE_HALT);
1686 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1687 if (devinfo->gen >= 8) {
1688 brw_set_src0(p, insn, brw_imm_d(0x0));
1689 } else {
1690 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1691 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1692 }
1693
1694 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1695 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1696 return insn;
1697 }
1698
1699 /* DO/WHILE loop:
1700 *
1701 * The DO/WHILE is just an unterminated loop -- break or continue are
1702 * used for control within the loop. We have a few ways they can be
1703 * done.
1704 *
1705 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1706 * jip and no DO instruction.
1707 *
1708 * For non-uniform control flow pre-gen6, there's a DO instruction to
1709 * push the mask, and a WHILE to jump back, and BREAK to get out and
1710 * pop the mask.
1711 *
1712 * For gen6, there's no more mask stack, so no need for DO. WHILE
1713 * just points back to the first instruction of the loop.
1714 */
1715 brw_inst *
1716 brw_DO(struct brw_codegen *p, unsigned execute_size)
1717 {
1718 const struct gen_device_info *devinfo = p->devinfo;
1719
1720 if (devinfo->gen >= 6 || p->single_program_flow) {
1721 push_loop_stack(p, &p->store[p->nr_insn]);
1722 return &p->store[p->nr_insn];
1723 } else {
1724 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1725
1726 push_loop_stack(p, insn);
1727
1728 /* Override the defaults for this instruction:
1729 */
1730 brw_set_dest(p, insn, brw_null_reg());
1731 brw_set_src0(p, insn, brw_null_reg());
1732 brw_set_src1(p, insn, brw_null_reg());
1733
1734 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1735 brw_inst_set_exec_size(devinfo, insn, execute_size);
1736 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1737
1738 return insn;
1739 }
1740 }
1741
1742 /**
1743 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1744 * instruction here.
1745 *
1746 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1747 * nesting, since it can always just point to the end of the block/current loop.
1748 */
1749 static void
1750 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1751 {
1752 const struct gen_device_info *devinfo = p->devinfo;
1753 brw_inst *do_inst = get_inner_do_insn(p);
1754 brw_inst *inst;
1755 unsigned br = brw_jump_scale(devinfo);
1756
1757 assert(devinfo->gen < 6);
1758
1759 for (inst = while_inst - 1; inst != do_inst; inst--) {
1760 /* If the jump count is != 0, that means that this instruction has already
1761 * been patched because it's part of a loop inside of the one we're
1762 * patching.
1763 */
1764 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1765 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1766 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1767 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1768 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1769 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1770 }
1771 }
1772 }
1773
1774 brw_inst *
1775 brw_WHILE(struct brw_codegen *p)
1776 {
1777 const struct gen_device_info *devinfo = p->devinfo;
1778 brw_inst *insn, *do_insn;
1779 unsigned br = brw_jump_scale(devinfo);
1780
1781 if (devinfo->gen >= 6) {
1782 insn = next_insn(p, BRW_OPCODE_WHILE);
1783 do_insn = get_inner_do_insn(p);
1784
1785 if (devinfo->gen >= 8) {
1786 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1787 brw_set_src0(p, insn, brw_imm_d(0));
1788 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1789 } else if (devinfo->gen == 7) {
1790 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1791 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1792 brw_set_src1(p, insn, brw_imm_w(0));
1793 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1794 } else {
1795 brw_set_dest(p, insn, brw_imm_w(0));
1796 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1797 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1798 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1799 }
1800
1801 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1802
1803 } else {
1804 if (p->single_program_flow) {
1805 insn = next_insn(p, BRW_OPCODE_ADD);
1806 do_insn = get_inner_do_insn(p);
1807
1808 brw_set_dest(p, insn, brw_ip_reg());
1809 brw_set_src0(p, insn, brw_ip_reg());
1810 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1811 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1812 } else {
1813 insn = next_insn(p, BRW_OPCODE_WHILE);
1814 do_insn = get_inner_do_insn(p);
1815
1816 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1817
1818 brw_set_dest(p, insn, brw_ip_reg());
1819 brw_set_src0(p, insn, brw_ip_reg());
1820 brw_set_src1(p, insn, brw_imm_d(0));
1821
1822 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1823 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1824 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1825
1826 brw_patch_break_cont(p, insn);
1827 }
1828 }
1829 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1830
1831 p->loop_stack_depth--;
1832
1833 return insn;
1834 }
1835
1836 /* FORWARD JUMPS:
1837 */
1838 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1839 {
1840 const struct gen_device_info *devinfo = p->devinfo;
1841 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1842 unsigned jmpi = 1;
1843
1844 if (devinfo->gen >= 5)
1845 jmpi = 2;
1846
1847 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1848 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1849
1850 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1851 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1852 }
1853
1854 /* To integrate with the above, it makes sense that the comparison
1855 * instruction should populate the flag register. It might be simpler
1856 * just to use the flag reg for most WM tasks?
1857 */
1858 void brw_CMP(struct brw_codegen *p,
1859 struct brw_reg dest,
1860 unsigned conditional,
1861 struct brw_reg src0,
1862 struct brw_reg src1)
1863 {
1864 const struct gen_device_info *devinfo = p->devinfo;
1865 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1866
1867 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1868 brw_set_dest(p, insn, dest);
1869 brw_set_src0(p, insn, src0);
1870 brw_set_src1(p, insn, src1);
1871
1872 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1873 * page says:
1874 * "Any CMP instruction with a null destination must use a {switch}."
1875 *
1876 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1877 * mentioned on their work-arounds pages.
1878 */
1879 if (devinfo->gen == 7) {
1880 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1881 dest.nr == BRW_ARF_NULL) {
1882 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1883 }
1884 }
1885 }
1886
1887 /***********************************************************************
1888 * Helpers for the various SEND message types:
1889 */
1890
1891 /** Extended math function, float[8].
1892 */
1893 void gen4_math(struct brw_codegen *p,
1894 struct brw_reg dest,
1895 unsigned function,
1896 unsigned msg_reg_nr,
1897 struct brw_reg src,
1898 unsigned precision )
1899 {
1900 const struct gen_device_info *devinfo = p->devinfo;
1901 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1902 unsigned data_type;
1903 if (has_scalar_region(src)) {
1904 data_type = BRW_MATH_DATA_SCALAR;
1905 } else {
1906 data_type = BRW_MATH_DATA_VECTOR;
1907 }
1908
1909 assert(devinfo->gen < 6);
1910
1911 /* Example code doesn't set predicate_control for send
1912 * instructions.
1913 */
1914 brw_inst_set_pred_control(devinfo, insn, 0);
1915 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1916
1917 brw_set_dest(p, insn, dest);
1918 brw_set_src0(p, insn, src);
1919 brw_set_math_message(p,
1920 insn,
1921 function,
1922 src.type == BRW_REGISTER_TYPE_D,
1923 precision,
1924 data_type);
1925 }
1926
1927 void gen6_math(struct brw_codegen *p,
1928 struct brw_reg dest,
1929 unsigned function,
1930 struct brw_reg src0,
1931 struct brw_reg src1)
1932 {
1933 const struct gen_device_info *devinfo = p->devinfo;
1934 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1935
1936 assert(devinfo->gen >= 6);
1937
1938 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1939 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1940
1941 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1942 if (devinfo->gen == 6) {
1943 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1944 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1945 }
1946
1947 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1948 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1949 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1950 assert(src0.type != BRW_REGISTER_TYPE_F);
1951 assert(src1.type != BRW_REGISTER_TYPE_F);
1952 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1953 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1954 } else {
1955 assert(src0.type == BRW_REGISTER_TYPE_F);
1956 assert(src1.type == BRW_REGISTER_TYPE_F);
1957 }
1958
1959 /* Source modifiers are ignored for extended math instructions on Gen6. */
1960 if (devinfo->gen == 6) {
1961 assert(!src0.negate);
1962 assert(!src0.abs);
1963 assert(!src1.negate);
1964 assert(!src1.abs);
1965 }
1966
1967 brw_inst_set_math_function(devinfo, insn, function);
1968
1969 brw_set_dest(p, insn, dest);
1970 brw_set_src0(p, insn, src0);
1971 brw_set_src1(p, insn, src1);
1972 }
1973
1974 /**
1975 * Return the right surface index to access the thread scratch space using
1976 * stateless dataport messages.
1977 */
1978 unsigned
1979 brw_scratch_surface_idx(const struct brw_codegen *p)
1980 {
1981 /* The scratch space is thread-local so IA coherency is unnecessary. */
1982 if (p->devinfo->gen >= 8)
1983 return GEN8_BTI_STATELESS_NON_COHERENT;
1984 else
1985 return BRW_BTI_STATELESS;
1986 }
1987
1988 /**
1989 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1990 * using a constant offset per channel.
1991 *
1992 * The offset must be aligned to oword size (16 bytes). Used for
1993 * register spilling.
1994 */
1995 void brw_oword_block_write_scratch(struct brw_codegen *p,
1996 struct brw_reg mrf,
1997 int num_regs,
1998 unsigned offset)
1999 {
2000 const struct gen_device_info *devinfo = p->devinfo;
2001 const unsigned target_cache =
2002 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2003 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2004 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2005 uint32_t msg_type;
2006
2007 if (devinfo->gen >= 6)
2008 offset /= 16;
2009
2010 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2011
2012 const unsigned mlen = 1 + num_regs;
2013
2014 /* Set up the message header. This is g0, with g0.2 filled with
2015 * the offset. We don't want to leave our offset around in g0 or
2016 * it'll screw up texture samples, so set it up inside the message
2017 * reg.
2018 */
2019 {
2020 brw_push_insn_state(p);
2021 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2022 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2023 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2024
2025 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2026
2027 /* set message header global offset field (reg 0, element 2) */
2028 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2029 brw_MOV(p,
2030 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2031 mrf.nr,
2032 2), BRW_REGISTER_TYPE_UD),
2033 brw_imm_ud(offset));
2034
2035 brw_pop_insn_state(p);
2036 }
2037
2038 {
2039 struct brw_reg dest;
2040 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2041 int send_commit_msg;
2042 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2043 BRW_REGISTER_TYPE_UW);
2044
2045 brw_inst_set_compression(devinfo, insn, false);
2046
2047 if (brw_inst_exec_size(devinfo, insn) >= 16)
2048 src_header = vec16(src_header);
2049
2050 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2051 if (devinfo->gen < 6)
2052 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2053
2054 /* Until gen6, writes followed by reads from the same location
2055 * are not guaranteed to be ordered unless write_commit is set.
2056 * If set, then a no-op write is issued to the destination
2057 * register to set a dependency, and a read from the destination
2058 * can be used to ensure the ordering.
2059 *
2060 * For gen6, only writes between different threads need ordering
2061 * protection. Our use of DP writes is all about register
2062 * spilling within a thread.
2063 */
2064 if (devinfo->gen >= 6) {
2065 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2066 send_commit_msg = 0;
2067 } else {
2068 dest = src_header;
2069 send_commit_msg = 1;
2070 }
2071
2072 brw_set_dest(p, insn, dest);
2073 if (devinfo->gen >= 6) {
2074 brw_set_src0(p, insn, mrf);
2075 } else {
2076 brw_set_src0(p, insn, brw_null_reg());
2077 }
2078
2079 if (devinfo->gen >= 6)
2080 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2081 else
2082 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2083
2084 brw_set_dp_write_message(p,
2085 insn,
2086 brw_scratch_surface_idx(p),
2087 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2088 msg_type,
2089 target_cache,
2090 mlen,
2091 true, /* header_present */
2092 0, /* not a render target */
2093 send_commit_msg, /* response_length */
2094 0, /* eot */
2095 send_commit_msg);
2096 }
2097 }
2098
2099
2100 /**
2101 * Read a block of owords (half a GRF each) from the scratch buffer
2102 * using a constant index per channel.
2103 *
2104 * Offset must be aligned to oword size (16 bytes). Used for register
2105 * spilling.
2106 */
2107 void
2108 brw_oword_block_read_scratch(struct brw_codegen *p,
2109 struct brw_reg dest,
2110 struct brw_reg mrf,
2111 int num_regs,
2112 unsigned offset)
2113 {
2114 const struct gen_device_info *devinfo = p->devinfo;
2115
2116 if (devinfo->gen >= 6)
2117 offset /= 16;
2118
2119 if (p->devinfo->gen >= 7) {
2120 /* On gen 7 and above, we no longer have message registers and we can
2121 * send from any register we want. By using the destination register
2122 * for the message, we guarantee that the implied message write won't
2123 * accidentally overwrite anything. This has been a problem because
2124 * the MRF registers and source for the final FB write are both fixed
2125 * and may overlap.
2126 */
2127 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2128 } else {
2129 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2130 }
2131 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2132
2133 const unsigned rlen = num_regs;
2134 const unsigned target_cache =
2135 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2136 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2137 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2138
2139 {
2140 brw_push_insn_state(p);
2141 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2142 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2143 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2144
2145 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2146
2147 /* set message header global offset field (reg 0, element 2) */
2148 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2149 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2150
2151 brw_pop_insn_state(p);
2152 }
2153
2154 {
2155 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2156
2157 assert(brw_inst_pred_control(devinfo, insn) == 0);
2158 brw_inst_set_compression(devinfo, insn, false);
2159
2160 brw_set_dest(p, insn, dest); /* UW? */
2161 if (devinfo->gen >= 6) {
2162 brw_set_src0(p, insn, mrf);
2163 } else {
2164 brw_set_src0(p, insn, brw_null_reg());
2165 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2166 }
2167
2168 brw_set_dp_read_message(p,
2169 insn,
2170 brw_scratch_surface_idx(p),
2171 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2172 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2173 target_cache,
2174 1, /* msg_length */
2175 true, /* header_present */
2176 rlen);
2177 }
2178 }
2179
2180 void
2181 gen7_block_read_scratch(struct brw_codegen *p,
2182 struct brw_reg dest,
2183 int num_regs,
2184 unsigned offset)
2185 {
2186 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2187 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2188
2189 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2190
2191 /* The HW requires that the header is present; this is to get the g0.5
2192 * scratch offset.
2193 */
2194 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2195
2196 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2197 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2198 * is 32 bytes, which happens to be the size of a register.
2199 */
2200 offset /= REG_SIZE;
2201 assert(offset < (1 << 12));
2202
2203 gen7_set_dp_scratch_message(p, insn,
2204 false, /* scratch read */
2205 false, /* OWords */
2206 false, /* invalidate after read */
2207 num_regs,
2208 offset,
2209 1, /* mlen: just g0 */
2210 num_regs, /* rlen */
2211 true); /* header present */
2212 }
2213
2214 /**
2215 * Read float[4] vectors from the data port constant cache.
2216 * Location (in buffer) should be a multiple of 16.
2217 * Used for fetching shader constants.
2218 */
2219 void brw_oword_block_read(struct brw_codegen *p,
2220 struct brw_reg dest,
2221 struct brw_reg mrf,
2222 uint32_t offset,
2223 uint32_t bind_table_index)
2224 {
2225 const struct gen_device_info *devinfo = p->devinfo;
2226 const unsigned target_cache =
2227 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2228 BRW_DATAPORT_READ_TARGET_DATA_CACHE);
2229 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2230
2231 /* On newer hardware, offset is in units of owords. */
2232 if (devinfo->gen >= 6)
2233 offset /= 16;
2234
2235 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2236
2237 brw_push_insn_state(p);
2238 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2239 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2240 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2241
2242 brw_push_insn_state(p);
2243 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2244 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2245
2246 /* set message header global offset field (reg 0, element 2) */
2247 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2248 brw_MOV(p,
2249 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2250 mrf.nr,
2251 2), BRW_REGISTER_TYPE_UD),
2252 brw_imm_ud(offset));
2253 brw_pop_insn_state(p);
2254
2255 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2256
2257 /* cast dest to a uword[8] vector */
2258 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2259
2260 brw_set_dest(p, insn, dest);
2261 if (devinfo->gen >= 6) {
2262 brw_set_src0(p, insn, mrf);
2263 } else {
2264 brw_set_src0(p, insn, brw_null_reg());
2265 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2266 }
2267
2268 brw_set_dp_read_message(p, insn, bind_table_index,
2269 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2270 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2271 target_cache,
2272 1, /* msg_length */
2273 true, /* header_present */
2274 DIV_ROUND_UP(exec_size, 8)); /* response_length */
2275
2276 brw_pop_insn_state(p);
2277 }
2278
2279 brw_inst *
2280 brw_fb_WRITE(struct brw_codegen *p,
2281 struct brw_reg payload,
2282 struct brw_reg implied_header,
2283 unsigned msg_control,
2284 unsigned binding_table_index,
2285 unsigned msg_length,
2286 unsigned response_length,
2287 bool eot,
2288 bool last_render_target,
2289 bool header_present)
2290 {
2291 const struct gen_device_info *devinfo = p->devinfo;
2292 const unsigned target_cache =
2293 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2294 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2295 brw_inst *insn;
2296 unsigned msg_type;
2297 struct brw_reg dest, src0;
2298
2299 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2300 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2301 else
2302 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2303
2304 if (devinfo->gen >= 6) {
2305 insn = next_insn(p, BRW_OPCODE_SENDC);
2306 } else {
2307 insn = next_insn(p, BRW_OPCODE_SEND);
2308 }
2309 brw_inst_set_compression(devinfo, insn, false);
2310
2311 if (devinfo->gen >= 6) {
2312 /* headerless version, just submit color payload */
2313 src0 = payload;
2314
2315 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2316 } else {
2317 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2318 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2319 src0 = implied_header;
2320
2321 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2322 }
2323
2324 brw_set_dest(p, insn, dest);
2325 brw_set_src0(p, insn, src0);
2326 brw_set_dp_write_message(p,
2327 insn,
2328 binding_table_index,
2329 msg_control,
2330 msg_type,
2331 target_cache,
2332 msg_length,
2333 header_present,
2334 last_render_target,
2335 response_length,
2336 eot,
2337 0 /* send_commit_msg */);
2338
2339 return insn;
2340 }
2341
2342 brw_inst *
2343 gen9_fb_READ(struct brw_codegen *p,
2344 struct brw_reg dst,
2345 struct brw_reg payload,
2346 unsigned binding_table_index,
2347 unsigned msg_length,
2348 unsigned response_length,
2349 bool per_sample)
2350 {
2351 const struct gen_device_info *devinfo = p->devinfo;
2352 assert(devinfo->gen >= 9);
2353 const unsigned msg_subtype =
2354 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2355 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2356
2357 brw_set_dest(p, insn, dst);
2358 brw_set_src0(p, insn, payload);
2359 brw_set_dp_read_message(p, insn, binding_table_index,
2360 per_sample << 5 | msg_subtype,
2361 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2362 GEN6_SFID_DATAPORT_RENDER_CACHE,
2363 msg_length, true /* header_present */,
2364 response_length);
2365 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2366
2367 return insn;
2368 }
2369
2370 /**
2371 * Texture sample instruction.
2372 * Note: the msg_type plus msg_length values determine exactly what kind
2373 * of sampling operation is performed. See volume 4, page 161 of docs.
2374 */
2375 void brw_SAMPLE(struct brw_codegen *p,
2376 struct brw_reg dest,
2377 unsigned msg_reg_nr,
2378 struct brw_reg src0,
2379 unsigned binding_table_index,
2380 unsigned sampler,
2381 unsigned msg_type,
2382 unsigned response_length,
2383 unsigned msg_length,
2384 unsigned header_present,
2385 unsigned simd_mode,
2386 unsigned return_format)
2387 {
2388 const struct gen_device_info *devinfo = p->devinfo;
2389 brw_inst *insn;
2390
2391 if (msg_reg_nr != -1)
2392 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2393
2394 insn = next_insn(p, BRW_OPCODE_SEND);
2395 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2396
2397 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2398 *
2399 * "Instruction compression is not allowed for this instruction (that
2400 * is, send). The hardware behavior is undefined if this instruction is
2401 * set as compressed. However, compress control can be set to "SecHalf"
2402 * to affect the EMask generation."
2403 *
2404 * No similar wording is found in later PRMs, but there are examples
2405 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2406 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2407 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2408 */
2409 brw_inst_set_compression(devinfo, insn, false);
2410
2411 if (devinfo->gen < 6)
2412 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2413
2414 brw_set_dest(p, insn, dest);
2415 brw_set_src0(p, insn, src0);
2416 brw_set_sampler_message(p, insn,
2417 binding_table_index,
2418 sampler,
2419 msg_type,
2420 response_length,
2421 msg_length,
2422 header_present,
2423 simd_mode,
2424 return_format);
2425 }
2426
2427 /* Adjust the message header's sampler state pointer to
2428 * select the correct group of 16 samplers.
2429 */
2430 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2431 struct brw_reg header,
2432 struct brw_reg sampler_index)
2433 {
2434 /* The "Sampler Index" field can only store values between 0 and 15.
2435 * However, we can add an offset to the "Sampler State Pointer"
2436 * field, effectively selecting a different set of 16 samplers.
2437 *
2438 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2439 * offset, and each sampler state is only 16-bytes, so we can't
2440 * exclusively use the offset - we have to use both.
2441 */
2442
2443 const struct gen_device_info *devinfo = p->devinfo;
2444
2445 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2446 const int sampler_state_size = 16; /* 16 bytes */
2447 uint32_t sampler = sampler_index.ud;
2448
2449 if (sampler >= 16) {
2450 assert(devinfo->is_haswell || devinfo->gen >= 8);
2451 brw_ADD(p,
2452 get_element_ud(header, 3),
2453 get_element_ud(brw_vec8_grf(0, 0), 3),
2454 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2455 }
2456 } else {
2457 /* Non-const sampler array indexing case */
2458 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2459 return;
2460 }
2461
2462 struct brw_reg temp = get_element_ud(header, 3);
2463
2464 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2465 brw_SHL(p, temp, temp, brw_imm_ud(4));
2466 brw_ADD(p,
2467 get_element_ud(header, 3),
2468 get_element_ud(brw_vec8_grf(0, 0), 3),
2469 temp);
2470 }
2471 }
2472
2473 /* All these variables are pretty confusing - we might be better off
2474 * using bitmasks and macros for this, in the old style. Or perhaps
2475 * just having the caller instantiate the fields in dword3 itself.
2476 */
2477 void brw_urb_WRITE(struct brw_codegen *p,
2478 struct brw_reg dest,
2479 unsigned msg_reg_nr,
2480 struct brw_reg src0,
2481 enum brw_urb_write_flags flags,
2482 unsigned msg_length,
2483 unsigned response_length,
2484 unsigned offset,
2485 unsigned swizzle)
2486 {
2487 const struct gen_device_info *devinfo = p->devinfo;
2488 brw_inst *insn;
2489
2490 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2491
2492 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2493 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2494 brw_push_insn_state(p);
2495 brw_set_default_access_mode(p, BRW_ALIGN_1);
2496 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2497 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2498 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2499 BRW_REGISTER_TYPE_UD),
2500 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2501 brw_imm_ud(0xff00));
2502 brw_pop_insn_state(p);
2503 }
2504
2505 insn = next_insn(p, BRW_OPCODE_SEND);
2506
2507 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2508
2509 brw_set_dest(p, insn, dest);
2510 brw_set_src0(p, insn, src0);
2511 brw_set_src1(p, insn, brw_imm_d(0));
2512
2513 if (devinfo->gen < 6)
2514 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2515
2516 brw_set_urb_message(p,
2517 insn,
2518 flags,
2519 msg_length,
2520 response_length,
2521 offset,
2522 swizzle);
2523 }
2524
2525 struct brw_inst *
2526 brw_send_indirect_message(struct brw_codegen *p,
2527 unsigned sfid,
2528 struct brw_reg dst,
2529 struct brw_reg payload,
2530 struct brw_reg desc,
2531 unsigned desc_imm)
2532 {
2533 const struct gen_device_info *devinfo = p->devinfo;
2534 struct brw_inst *send;
2535 int setup;
2536
2537 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2538
2539 assert(desc.type == BRW_REGISTER_TYPE_UD);
2540
2541 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2542 * in the indirect case) by its index in the instruction store. The
2543 * pointer returned by next_insn() may become invalid if emitting the SEND
2544 * in the indirect case reallocs the store.
2545 */
2546
2547 if (desc.file == BRW_IMMEDIATE_VALUE) {
2548 setup = p->nr_insn;
2549 send = next_insn(p, BRW_OPCODE_SEND);
2550 brw_set_desc(p, send, desc.ud | desc_imm);
2551
2552 } else {
2553 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2554
2555 brw_push_insn_state(p);
2556 brw_set_default_access_mode(p, BRW_ALIGN_1);
2557 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2558 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2559 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2560
2561 /* Load the indirect descriptor to an address register using OR so the
2562 * caller can specify additional descriptor bits with the usual
2563 * brw_set_*_message() helper functions.
2564 */
2565 setup = p->nr_insn;
2566 brw_OR(p, addr, desc, brw_imm_ud(desc_imm));
2567
2568 brw_pop_insn_state(p);
2569
2570 send = next_insn(p, BRW_OPCODE_SEND);
2571 brw_set_src1(p, send, addr);
2572 }
2573
2574 if (dst.width < BRW_EXECUTE_8)
2575 brw_inst_set_exec_size(devinfo, send, dst.width);
2576
2577 brw_set_dest(p, send, dst);
2578 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2579 brw_inst_set_sfid(devinfo, send, sfid);
2580
2581 return &p->store[setup];
2582 }
2583
2584 static struct brw_inst *
2585 brw_send_indirect_surface_message(struct brw_codegen *p,
2586 unsigned sfid,
2587 struct brw_reg dst,
2588 struct brw_reg payload,
2589 struct brw_reg surface,
2590 unsigned message_len,
2591 unsigned response_len,
2592 bool header_present)
2593 {
2594 const struct gen_device_info *devinfo = p->devinfo;
2595 struct brw_inst *insn;
2596
2597 if (surface.file != BRW_IMMEDIATE_VALUE) {
2598 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2599
2600 brw_push_insn_state(p);
2601 brw_set_default_access_mode(p, BRW_ALIGN_1);
2602 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2603 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2604 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2605
2606 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2607 * some surface array is accessed out of bounds.
2608 */
2609 insn = brw_AND(p, addr,
2610 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2611 BRW_GET_SWZ(surface.swizzle, 0)),
2612 brw_imm_ud(0xff));
2613
2614 brw_pop_insn_state(p);
2615
2616 surface = addr;
2617 }
2618
2619 insn = brw_send_indirect_message(p, sfid, dst, payload, surface, 0);
2620 brw_inst_set_mlen(devinfo, insn, message_len);
2621 brw_inst_set_rlen(devinfo, insn, response_len);
2622 brw_inst_set_header_present(devinfo, insn, header_present);
2623
2624 return insn;
2625 }
2626
2627 static bool
2628 while_jumps_before_offset(const struct gen_device_info *devinfo,
2629 brw_inst *insn, int while_offset, int start_offset)
2630 {
2631 int scale = 16 / brw_jump_scale(devinfo);
2632 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2633 : brw_inst_jip(devinfo, insn);
2634 assert(jip < 0);
2635 return while_offset + jip * scale <= start_offset;
2636 }
2637
2638
2639 static int
2640 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2641 {
2642 int offset;
2643 void *store = p->store;
2644 const struct gen_device_info *devinfo = p->devinfo;
2645
2646 int depth = 0;
2647
2648 for (offset = next_offset(devinfo, store, start_offset);
2649 offset < p->next_insn_offset;
2650 offset = next_offset(devinfo, store, offset)) {
2651 brw_inst *insn = store + offset;
2652
2653 switch (brw_inst_opcode(devinfo, insn)) {
2654 case BRW_OPCODE_IF:
2655 depth++;
2656 break;
2657 case BRW_OPCODE_ENDIF:
2658 if (depth == 0)
2659 return offset;
2660 depth--;
2661 break;
2662 case BRW_OPCODE_WHILE:
2663 /* If the while doesn't jump before our instruction, it's the end
2664 * of a sibling do...while loop. Ignore it.
2665 */
2666 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2667 continue;
2668 /* fallthrough */
2669 case BRW_OPCODE_ELSE:
2670 case BRW_OPCODE_HALT:
2671 if (depth == 0)
2672 return offset;
2673 }
2674 }
2675
2676 return 0;
2677 }
2678
2679 /* There is no DO instruction on gen6, so to find the end of the loop
2680 * we have to see if the loop is jumping back before our start
2681 * instruction.
2682 */
2683 static int
2684 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2685 {
2686 const struct gen_device_info *devinfo = p->devinfo;
2687 int offset;
2688 void *store = p->store;
2689
2690 assert(devinfo->gen >= 6);
2691
2692 /* Always start after the instruction (such as a WHILE) we're trying to fix
2693 * up.
2694 */
2695 for (offset = next_offset(devinfo, store, start_offset);
2696 offset < p->next_insn_offset;
2697 offset = next_offset(devinfo, store, offset)) {
2698 brw_inst *insn = store + offset;
2699
2700 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2701 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2702 return offset;
2703 }
2704 }
2705 assert(!"not reached");
2706 return start_offset;
2707 }
2708
2709 /* After program generation, go back and update the UIP and JIP of
2710 * BREAK, CONT, and HALT instructions to their correct locations.
2711 */
2712 void
2713 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2714 {
2715 const struct gen_device_info *devinfo = p->devinfo;
2716 int offset;
2717 int br = brw_jump_scale(devinfo);
2718 int scale = 16 / br;
2719 void *store = p->store;
2720
2721 if (devinfo->gen < 6)
2722 return;
2723
2724 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2725 brw_inst *insn = store + offset;
2726 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2727
2728 int block_end_offset = brw_find_next_block_end(p, offset);
2729 switch (brw_inst_opcode(devinfo, insn)) {
2730 case BRW_OPCODE_BREAK:
2731 assert(block_end_offset != 0);
2732 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2733 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2734 brw_inst_set_uip(devinfo, insn,
2735 (brw_find_loop_end(p, offset) - offset +
2736 (devinfo->gen == 6 ? 16 : 0)) / scale);
2737 break;
2738 case BRW_OPCODE_CONTINUE:
2739 assert(block_end_offset != 0);
2740 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2741 brw_inst_set_uip(devinfo, insn,
2742 (brw_find_loop_end(p, offset) - offset) / scale);
2743
2744 assert(brw_inst_uip(devinfo, insn) != 0);
2745 assert(brw_inst_jip(devinfo, insn) != 0);
2746 break;
2747
2748 case BRW_OPCODE_ENDIF: {
2749 int32_t jump = (block_end_offset == 0) ?
2750 1 * br : (block_end_offset - offset) / scale;
2751 if (devinfo->gen >= 7)
2752 brw_inst_set_jip(devinfo, insn, jump);
2753 else
2754 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2755 break;
2756 }
2757
2758 case BRW_OPCODE_HALT:
2759 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2760 *
2761 * "In case of the halt instruction not inside any conditional
2762 * code block, the value of <JIP> and <UIP> should be the
2763 * same. In case of the halt instruction inside conditional code
2764 * block, the <UIP> should be the end of the program, and the
2765 * <JIP> should be end of the most inner conditional code block."
2766 *
2767 * The uip will have already been set by whoever set up the
2768 * instruction.
2769 */
2770 if (block_end_offset == 0) {
2771 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2772 } else {
2773 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2774 }
2775 assert(brw_inst_uip(devinfo, insn) != 0);
2776 assert(brw_inst_jip(devinfo, insn) != 0);
2777 break;
2778 }
2779 }
2780 }
2781
2782 void brw_ff_sync(struct brw_codegen *p,
2783 struct brw_reg dest,
2784 unsigned msg_reg_nr,
2785 struct brw_reg src0,
2786 bool allocate,
2787 unsigned response_length,
2788 bool eot)
2789 {
2790 const struct gen_device_info *devinfo = p->devinfo;
2791 brw_inst *insn;
2792
2793 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2794
2795 insn = next_insn(p, BRW_OPCODE_SEND);
2796 brw_set_dest(p, insn, dest);
2797 brw_set_src0(p, insn, src0);
2798 brw_set_src1(p, insn, brw_imm_d(0));
2799
2800 if (devinfo->gen < 6)
2801 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2802
2803 brw_set_ff_sync_message(p,
2804 insn,
2805 allocate,
2806 response_length,
2807 eot);
2808 }
2809
2810 /**
2811 * Emit the SEND instruction necessary to generate stream output data on Gen6
2812 * (for transform feedback).
2813 *
2814 * If send_commit_msg is true, this is the last piece of stream output data
2815 * from this thread, so send the data as a committed write. According to the
2816 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2817 *
2818 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2819 * writes are complete by sending the final write as a committed write."
2820 */
2821 void
2822 brw_svb_write(struct brw_codegen *p,
2823 struct brw_reg dest,
2824 unsigned msg_reg_nr,
2825 struct brw_reg src0,
2826 unsigned binding_table_index,
2827 bool send_commit_msg)
2828 {
2829 const struct gen_device_info *devinfo = p->devinfo;
2830 const unsigned target_cache =
2831 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2832 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2833 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2834 brw_inst *insn;
2835
2836 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2837
2838 insn = next_insn(p, BRW_OPCODE_SEND);
2839 brw_set_dest(p, insn, dest);
2840 brw_set_src0(p, insn, src0);
2841 brw_set_src1(p, insn, brw_imm_d(0));
2842 brw_set_dp_write_message(p, insn,
2843 binding_table_index,
2844 0, /* msg_control: ignored */
2845 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2846 target_cache,
2847 1, /* msg_length */
2848 true, /* header_present */
2849 0, /* last_render_target: ignored */
2850 send_commit_msg, /* response_length */
2851 0, /* end_of_thread */
2852 send_commit_msg); /* send_commit_msg */
2853 }
2854
2855 static unsigned
2856 brw_surface_payload_size(struct brw_codegen *p,
2857 unsigned num_channels,
2858 bool has_simd4x2,
2859 bool has_simd16)
2860 {
2861 if (has_simd4x2 && brw_get_default_access_mode(p) == BRW_ALIGN_16)
2862 return 1;
2863 else if (has_simd16 && brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2864 return 2 * num_channels;
2865 else
2866 return num_channels;
2867 }
2868
2869 static void
2870 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2871 brw_inst *insn,
2872 unsigned atomic_op,
2873 bool response_expected)
2874 {
2875 const struct gen_device_info *devinfo = p->devinfo;
2876 unsigned msg_control =
2877 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2878 (response_expected ? 1 << 5 : 0); /* Return data expected */
2879
2880 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2881 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2882 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2883 msg_control |= 1 << 4; /* SIMD8 mode */
2884
2885 brw_inst_set_dp_msg_type(devinfo, insn,
2886 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2887 } else {
2888 brw_inst_set_dp_msg_type(devinfo, insn,
2889 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2890 }
2891 } else {
2892 brw_inst_set_dp_msg_type(devinfo, insn,
2893 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2894
2895 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2896 msg_control |= 1 << 4; /* SIMD8 mode */
2897 }
2898
2899 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2900 }
2901
2902 void
2903 brw_untyped_atomic(struct brw_codegen *p,
2904 struct brw_reg dst,
2905 struct brw_reg payload,
2906 struct brw_reg surface,
2907 unsigned atomic_op,
2908 unsigned msg_length,
2909 bool response_expected,
2910 bool header_present)
2911 {
2912 const struct gen_device_info *devinfo = p->devinfo;
2913 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2914 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2915 GEN7_SFID_DATAPORT_DATA_CACHE);
2916 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2917 /* Mask out unused components -- This is especially important in Align16
2918 * mode on generations that don't have native support for SIMD4x2 atomics,
2919 * because unused but enabled components will cause the dataport to perform
2920 * additional atomic operations on the addresses that happen to be in the
2921 * uninitialized Y, Z and W coordinates of the payload.
2922 */
2923 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2924 struct brw_inst *insn = brw_send_indirect_surface_message(
2925 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2926 brw_surface_payload_size(p, response_expected,
2927 devinfo->gen >= 8 || devinfo->is_haswell, true),
2928 header_present);
2929
2930 brw_set_dp_untyped_atomic_message(
2931 p, insn, atomic_op, response_expected);
2932 }
2933
2934 static void
2935 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2936 struct brw_inst *insn,
2937 unsigned num_channels)
2938 {
2939 const struct gen_device_info *devinfo = p->devinfo;
2940 /* Set mask of 32-bit channels to drop. */
2941 unsigned msg_control = 0xf & (0xf << num_channels);
2942
2943 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2944 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2945 msg_control |= 1 << 4; /* SIMD16 mode */
2946 else
2947 msg_control |= 2 << 4; /* SIMD8 mode */
2948 }
2949
2950 brw_inst_set_dp_msg_type(devinfo, insn,
2951 (devinfo->gen >= 8 || devinfo->is_haswell ?
2952 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2953 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
2954 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2955 }
2956
2957 void
2958 brw_untyped_surface_read(struct brw_codegen *p,
2959 struct brw_reg dst,
2960 struct brw_reg payload,
2961 struct brw_reg surface,
2962 unsigned msg_length,
2963 unsigned num_channels)
2964 {
2965 const struct gen_device_info *devinfo = p->devinfo;
2966 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2967 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2968 GEN7_SFID_DATAPORT_DATA_CACHE);
2969 struct brw_inst *insn = brw_send_indirect_surface_message(
2970 p, sfid, dst, payload, surface, msg_length,
2971 brw_surface_payload_size(p, num_channels, true, true),
2972 false);
2973
2974 brw_set_dp_untyped_surface_read_message(
2975 p, insn, num_channels);
2976 }
2977
2978 static void
2979 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
2980 struct brw_inst *insn,
2981 unsigned num_channels)
2982 {
2983 const struct gen_device_info *devinfo = p->devinfo;
2984 /* Set mask of 32-bit channels to drop. */
2985 unsigned msg_control = 0xf & (0xf << num_channels);
2986
2987 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2988 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2989 msg_control |= 1 << 4; /* SIMD16 mode */
2990 else
2991 msg_control |= 2 << 4; /* SIMD8 mode */
2992 } else {
2993 if (devinfo->gen >= 8 || devinfo->is_haswell)
2994 msg_control |= 0 << 4; /* SIMD4x2 mode */
2995 else
2996 msg_control |= 2 << 4; /* SIMD8 mode */
2997 }
2998
2999 brw_inst_set_dp_msg_type(devinfo, insn,
3000 devinfo->gen >= 8 || devinfo->is_haswell ?
3001 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
3002 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
3003 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3004 }
3005
3006 void
3007 brw_untyped_surface_write(struct brw_codegen *p,
3008 struct brw_reg payload,
3009 struct brw_reg surface,
3010 unsigned msg_length,
3011 unsigned num_channels,
3012 bool header_present)
3013 {
3014 const struct gen_device_info *devinfo = p->devinfo;
3015 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3016 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3017 GEN7_SFID_DATAPORT_DATA_CACHE);
3018 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3019 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3020 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3021 WRITEMASK_X : WRITEMASK_XYZW;
3022 struct brw_inst *insn = brw_send_indirect_surface_message(
3023 p, sfid, brw_writemask(brw_null_reg(), mask),
3024 payload, surface, msg_length, 0, header_present);
3025
3026 brw_set_dp_untyped_surface_write_message(
3027 p, insn, num_channels);
3028 }
3029
3030 static unsigned
3031 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
3032 {
3033 switch (bit_size) {
3034 case 8:
3035 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
3036 case 16:
3037 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
3038 case 32:
3039 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
3040 default:
3041 unreachable("Unsupported bit_size for byte scattered messages");
3042 }
3043 }
3044
3045
3046 void
3047 brw_byte_scattered_read(struct brw_codegen *p,
3048 struct brw_reg dst,
3049 struct brw_reg payload,
3050 struct brw_reg surface,
3051 unsigned msg_length,
3052 unsigned bit_size)
3053 {
3054 const struct gen_device_info *devinfo = p->devinfo;
3055 assert(devinfo->gen > 7 || devinfo->is_haswell);
3056 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3057 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3058
3059 struct brw_inst *insn = brw_send_indirect_surface_message(
3060 p, sfid, dst, payload, surface, msg_length,
3061 brw_surface_payload_size(p, 1, true, true),
3062 false);
3063
3064 unsigned msg_control =
3065 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3066
3067 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3068 msg_control |= 1; /* SIMD16 mode */
3069 else
3070 msg_control |= 0; /* SIMD8 mode */
3071
3072 brw_inst_set_dp_msg_type(devinfo, insn,
3073 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3074 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3075 }
3076
3077 void
3078 brw_byte_scattered_write(struct brw_codegen *p,
3079 struct brw_reg payload,
3080 struct brw_reg surface,
3081 unsigned msg_length,
3082 unsigned bit_size,
3083 bool header_present)
3084 {
3085 const struct gen_device_info *devinfo = p->devinfo;
3086 assert(devinfo->gen > 7 || devinfo->is_haswell);
3087 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3088 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3089
3090 struct brw_inst *insn = brw_send_indirect_surface_message(
3091 p, sfid, brw_writemask(brw_null_reg(), WRITEMASK_XYZW),
3092 payload, surface, msg_length, 0, header_present);
3093
3094 unsigned msg_control =
3095 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3096
3097 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3098 msg_control |= 1;
3099 else
3100 msg_control |= 0;
3101
3102 brw_inst_set_dp_msg_type(devinfo, insn,
3103 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3104 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3105 }
3106
3107 static void
3108 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3109 struct brw_inst *insn,
3110 unsigned atomic_op,
3111 bool response_expected)
3112 {
3113 const struct gen_device_info *devinfo = p->devinfo;
3114 unsigned msg_control =
3115 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3116 (response_expected ? 1 << 5 : 0); /* Return data expected */
3117
3118 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3119 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3120 if ((brw_get_default_group(p) / 8) % 2 == 1)
3121 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3122
3123 brw_inst_set_dp_msg_type(devinfo, insn,
3124 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3125 } else {
3126 brw_inst_set_dp_msg_type(devinfo, insn,
3127 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3128 }
3129
3130 } else {
3131 brw_inst_set_dp_msg_type(devinfo, insn,
3132 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3133
3134 if ((brw_get_default_group(p) / 8) % 2 == 1)
3135 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3136 }
3137
3138 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3139 }
3140
3141 void
3142 brw_typed_atomic(struct brw_codegen *p,
3143 struct brw_reg dst,
3144 struct brw_reg payload,
3145 struct brw_reg surface,
3146 unsigned atomic_op,
3147 unsigned msg_length,
3148 bool response_expected,
3149 bool header_present) {
3150 const struct gen_device_info *devinfo = p->devinfo;
3151 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3152 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3153 GEN6_SFID_DATAPORT_RENDER_CACHE);
3154 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3155 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3156 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3157 struct brw_inst *insn = brw_send_indirect_surface_message(
3158 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3159 brw_surface_payload_size(p, response_expected,
3160 devinfo->gen >= 8 || devinfo->is_haswell, false),
3161 header_present);
3162
3163 brw_set_dp_typed_atomic_message(
3164 p, insn, atomic_op, response_expected);
3165 }
3166
3167 static void
3168 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3169 struct brw_inst *insn,
3170 unsigned num_channels)
3171 {
3172 const struct gen_device_info *devinfo = p->devinfo;
3173 /* Set mask of unused channels. */
3174 unsigned msg_control = 0xf & (0xf << num_channels);
3175
3176 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3177 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3178 if ((brw_get_default_group(p) / 8) % 2 == 1)
3179 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3180 else
3181 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3182 }
3183
3184 brw_inst_set_dp_msg_type(devinfo, insn,
3185 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3186 } else {
3187 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3188 if ((brw_get_default_group(p) / 8) % 2 == 1)
3189 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3190 }
3191
3192 brw_inst_set_dp_msg_type(devinfo, insn,
3193 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3194 }
3195
3196 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3197 }
3198
3199 void
3200 brw_typed_surface_read(struct brw_codegen *p,
3201 struct brw_reg dst,
3202 struct brw_reg payload,
3203 struct brw_reg surface,
3204 unsigned msg_length,
3205 unsigned num_channels,
3206 bool header_present)
3207 {
3208 const struct gen_device_info *devinfo = p->devinfo;
3209 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3210 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3211 GEN6_SFID_DATAPORT_RENDER_CACHE);
3212 struct brw_inst *insn = brw_send_indirect_surface_message(
3213 p, sfid, dst, payload, surface, msg_length,
3214 brw_surface_payload_size(p, num_channels,
3215 devinfo->gen >= 8 || devinfo->is_haswell, false),
3216 header_present);
3217
3218 brw_set_dp_typed_surface_read_message(
3219 p, insn, num_channels);
3220 }
3221
3222 static void
3223 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3224 struct brw_inst *insn,
3225 unsigned num_channels)
3226 {
3227 const struct gen_device_info *devinfo = p->devinfo;
3228 /* Set mask of unused channels. */
3229 unsigned msg_control = 0xf & (0xf << num_channels);
3230
3231 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3232 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3233 if ((brw_get_default_group(p) / 8) % 2 == 1)
3234 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3235 else
3236 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3237 }
3238
3239 brw_inst_set_dp_msg_type(devinfo, insn,
3240 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3241
3242 } else {
3243 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3244 if ((brw_get_default_group(p) / 8) % 2 == 1)
3245 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3246 }
3247
3248 brw_inst_set_dp_msg_type(devinfo, insn,
3249 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3250 }
3251
3252 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3253 }
3254
3255 void
3256 brw_typed_surface_write(struct brw_codegen *p,
3257 struct brw_reg payload,
3258 struct brw_reg surface,
3259 unsigned msg_length,
3260 unsigned num_channels,
3261 bool header_present)
3262 {
3263 const struct gen_device_info *devinfo = p->devinfo;
3264 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3265 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3266 GEN6_SFID_DATAPORT_RENDER_CACHE);
3267 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3268 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3269 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3270 WRITEMASK_X : WRITEMASK_XYZW);
3271 struct brw_inst *insn = brw_send_indirect_surface_message(
3272 p, sfid, brw_writemask(brw_null_reg(), mask),
3273 payload, surface, msg_length, 0, header_present);
3274
3275 brw_set_dp_typed_surface_write_message(
3276 p, insn, num_channels);
3277 }
3278
3279 static void
3280 brw_set_memory_fence_message(struct brw_codegen *p,
3281 struct brw_inst *insn,
3282 enum brw_message_target sfid,
3283 bool commit_enable)
3284 {
3285 const struct gen_device_info *devinfo = p->devinfo;
3286
3287 brw_set_desc(p, insn, brw_message_desc(
3288 devinfo, 1, (commit_enable ? 1 : 0), true));
3289
3290 brw_inst_set_sfid(devinfo, insn, sfid);
3291
3292 switch (sfid) {
3293 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3294 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3295 break;
3296 case GEN7_SFID_DATAPORT_DATA_CACHE:
3297 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3298 break;
3299 default:
3300 unreachable("Not reached");
3301 }
3302
3303 if (commit_enable)
3304 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3305 }
3306
3307 void
3308 brw_memory_fence(struct brw_codegen *p,
3309 struct brw_reg dst,
3310 enum opcode send_op)
3311 {
3312 const struct gen_device_info *devinfo = p->devinfo;
3313 const bool commit_enable =
3314 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3315 (devinfo->gen == 7 && !devinfo->is_haswell);
3316 struct brw_inst *insn;
3317
3318 brw_push_insn_state(p);
3319 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3320 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3321 dst = vec1(dst);
3322
3323 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3324 * message doesn't write anything back.
3325 */
3326 insn = next_insn(p, send_op);
3327 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3328 brw_set_dest(p, insn, dst);
3329 brw_set_src0(p, insn, dst);
3330 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3331 commit_enable);
3332
3333 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3334 /* IVB does typed surface access through the render cache, so we need to
3335 * flush it too. Use a different register so both flushes can be
3336 * pipelined by the hardware.
3337 */
3338 insn = next_insn(p, send_op);
3339 brw_set_dest(p, insn, offset(dst, 1));
3340 brw_set_src0(p, insn, offset(dst, 1));
3341 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3342 commit_enable);
3343
3344 /* Now write the response of the second message into the response of the
3345 * first to trigger a pipeline stall -- This way future render and data
3346 * cache messages will be properly ordered with respect to past data and
3347 * render cache messages.
3348 */
3349 brw_MOV(p, dst, offset(dst, 1));
3350 }
3351
3352 brw_pop_insn_state(p);
3353 }
3354
3355 void
3356 brw_pixel_interpolator_query(struct brw_codegen *p,
3357 struct brw_reg dest,
3358 struct brw_reg mrf,
3359 bool noperspective,
3360 unsigned mode,
3361 struct brw_reg data,
3362 unsigned msg_length,
3363 unsigned response_length)
3364 {
3365 const struct gen_device_info *devinfo = p->devinfo;
3366 struct brw_inst *insn;
3367 const uint16_t exec_size = brw_get_default_exec_size(p);
3368 const uint16_t qtr_ctrl = brw_get_default_group(p) / 8;
3369
3370 /* brw_send_indirect_message will automatically use a direct send message
3371 * if data is actually immediate.
3372 */
3373 insn = brw_send_indirect_message(p,
3374 GEN7_SFID_PIXEL_INTERPOLATOR,
3375 dest,
3376 mrf,
3377 vec1(data), 0);
3378 brw_inst_set_mlen(devinfo, insn, msg_length);
3379 brw_inst_set_rlen(devinfo, insn, response_length);
3380
3381 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3382 brw_inst_set_pi_slot_group(devinfo, insn, qtr_ctrl / 2);
3383 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3384 brw_inst_set_pi_message_type(devinfo, insn, mode);
3385 }
3386
3387 void
3388 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3389 struct brw_reg mask)
3390 {
3391 const struct gen_device_info *devinfo = p->devinfo;
3392 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3393 const unsigned qtr_control = brw_get_default_group(p) / 8;
3394 brw_inst *inst;
3395
3396 assert(devinfo->gen >= 7);
3397 assert(mask.type == BRW_REGISTER_TYPE_UD);
3398
3399 brw_push_insn_state(p);
3400
3401 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3402 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3403
3404 if (devinfo->gen >= 8) {
3405 /* Getting the first active channel index is easy on Gen8: Just find
3406 * the first bit set in the execution mask. The register exists on
3407 * HSW already but it reads back as all ones when the current
3408 * instruction has execution masking disabled, so it's kind of
3409 * useless.
3410 */
3411 struct brw_reg exec_mask =
3412 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3413
3414 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3415 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3416 /* Unfortunately, ce0 does not take into account the thread
3417 * dispatch mask, which may be a problem in cases where it's not
3418 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3419 * some n). Combine ce0 with the given dispatch (or vector) mask
3420 * to mask off those channels which were never dispatched by the
3421 * hardware.
3422 */
3423 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3424 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3425 exec_mask = vec1(dst);
3426 }
3427
3428 /* Quarter control has the effect of magically shifting the value of
3429 * ce0 so you'll get the first active channel relative to the
3430 * specified quarter control as result.
3431 */
3432 inst = brw_FBL(p, vec1(dst), exec_mask);
3433 } else {
3434 const struct brw_reg flag = brw_flag_reg(p->current->flag_subreg / 2,
3435 p->current->flag_subreg % 2);
3436
3437 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3438 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3439
3440 /* Run enough instructions returning zero with execution masking and
3441 * a conditional modifier enabled in order to get the full execution
3442 * mask in f1.0. We could use a single 32-wide move here if it
3443 * weren't because of the hardware bug that causes channel enables to
3444 * be applied incorrectly to the second half of 32-wide instructions
3445 * on Gen7.
3446 */
3447 const unsigned lower_size = MIN2(16, exec_size);
3448 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3449 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3450 brw_imm_uw(0));
3451 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3452 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3453 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3454 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3455 }
3456
3457 /* Find the first bit set in the exec_size-wide portion of the flag
3458 * register that was updated by the last sequence of MOV
3459 * instructions.
3460 */
3461 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3462 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3463 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3464 }
3465 } else {
3466 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3467
3468 if (devinfo->gen >= 8 &&
3469 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3470 /* In SIMD4x2 mode the first active channel index is just the
3471 * negation of the first bit of the mask register. Note that ce0
3472 * doesn't take into account the dispatch mask, so the Gen7 path
3473 * should be used instead unless you have the guarantee that the
3474 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3475 * for some n).
3476 */
3477 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3478 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3479 brw_imm_ud(1));
3480
3481 } else {
3482 /* Overwrite the destination without and with execution masking to
3483 * find out which of the channels is active.
3484 */
3485 brw_push_insn_state(p);
3486 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3487 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3488 brw_imm_ud(1));
3489
3490 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3491 brw_imm_ud(0));
3492 brw_pop_insn_state(p);
3493 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3494 }
3495 }
3496
3497 brw_pop_insn_state(p);
3498 }
3499
3500 void
3501 brw_broadcast(struct brw_codegen *p,
3502 struct brw_reg dst,
3503 struct brw_reg src,
3504 struct brw_reg idx)
3505 {
3506 const struct gen_device_info *devinfo = p->devinfo;
3507 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3508 brw_inst *inst;
3509
3510 brw_push_insn_state(p);
3511 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3512 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3513
3514 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3515 src.address_mode == BRW_ADDRESS_DIRECT);
3516 assert(!src.abs && !src.negate);
3517 assert(src.type == dst.type);
3518
3519 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3520 idx.file == BRW_IMMEDIATE_VALUE) {
3521 /* Trivial, the source is already uniform or the index is a constant.
3522 * We will typically not get here if the optimizer is doing its job, but
3523 * asserting would be mean.
3524 */
3525 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3526 brw_MOV(p, dst,
3527 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3528 stride(suboffset(src, 4 * i), 0, 4, 1)));
3529 } else {
3530 /* From the Haswell PRM section "Register Region Restrictions":
3531 *
3532 * "The lower bits of the AddressImmediate must not overflow to
3533 * change the register address. The lower 5 bits of Address
3534 * Immediate when added to lower 5 bits of address register gives
3535 * the sub-register offset. The upper bits of Address Immediate
3536 * when added to upper bits of address register gives the register
3537 * address. Any overflow from sub-register offset is dropped."
3538 *
3539 * Fortunately, for broadcast, we never have a sub-register offset so
3540 * this isn't an issue.
3541 */
3542 assert(src.subnr == 0);
3543
3544 if (align1) {
3545 const struct brw_reg addr =
3546 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3547 unsigned offset = src.nr * REG_SIZE + src.subnr;
3548 /* Limit in bytes of the signed indirect addressing immediate. */
3549 const unsigned limit = 512;
3550
3551 brw_push_insn_state(p);
3552 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3553 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3554
3555 /* Take into account the component size and horizontal stride. */
3556 assert(src.vstride == src.hstride + src.width);
3557 brw_SHL(p, addr, vec1(idx),
3558 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3559 src.hstride - 1));
3560
3561 /* We can only address up to limit bytes using the indirect
3562 * addressing immediate, account for the difference if the source
3563 * register is above this limit.
3564 */
3565 if (offset >= limit) {
3566 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3567 offset = offset % limit;
3568 }
3569
3570 brw_pop_insn_state(p);
3571
3572 /* Use indirect addressing to fetch the specified component. */
3573 if (type_sz(src.type) > 4 &&
3574 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3575 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3576 *
3577 * "When source or destination datatype is 64b or operation is
3578 * integer DWord multiply, indirect addressing must not be
3579 * used."
3580 *
3581 * To work around both of this issue, we do two integer MOVs
3582 * insead of one 64-bit MOV. Because no double value should ever
3583 * cross a register boundary, it's safe to use the immediate
3584 * offset in the indirect here to handle adding 4 bytes to the
3585 * offset and avoid the extra ADD to the register file.
3586 */
3587 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3588 retype(brw_vec1_indirect(addr.subnr, offset),
3589 BRW_REGISTER_TYPE_D));
3590 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3591 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3592 BRW_REGISTER_TYPE_D));
3593 } else {
3594 brw_MOV(p, dst,
3595 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3596 }
3597 } else {
3598 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3599 * to all bits of a flag register,
3600 */
3601 inst = brw_MOV(p,
3602 brw_null_reg(),
3603 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3604 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3605 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3606 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3607
3608 /* and use predicated SEL to pick the right channel. */
3609 inst = brw_SEL(p, dst,
3610 stride(suboffset(src, 4), 4, 4, 1),
3611 stride(src, 4, 4, 1));
3612 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3613 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3614 }
3615 }
3616
3617 brw_pop_insn_state(p);
3618 }
3619
3620 /**
3621 * This instruction is generated as a single-channel align1 instruction by
3622 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3623 *
3624 * We can't use the typed atomic op in the FS because that has the execution
3625 * mask ANDed with the pixel mask, but we just want to write the one dword for
3626 * all the pixels.
3627 *
3628 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3629 * one u32. So we use the same untyped atomic write message as the pixel
3630 * shader.
3631 *
3632 * The untyped atomic operation requires a BUFFER surface type with RAW
3633 * format, and is only accessible through the legacy DATA_CACHE dataport
3634 * messages.
3635 */
3636 void brw_shader_time_add(struct brw_codegen *p,
3637 struct brw_reg payload,
3638 uint32_t surf_index)
3639 {
3640 const struct gen_device_info *devinfo = p->devinfo;
3641 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3642 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3643 GEN7_SFID_DATAPORT_DATA_CACHE);
3644 assert(devinfo->gen >= 7);
3645
3646 brw_push_insn_state(p);
3647 brw_set_default_access_mode(p, BRW_ALIGN_1);
3648 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3649 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3650 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3651
3652 /* We use brw_vec1_reg and unmasked because we want to increment the given
3653 * offset only once.
3654 */
3655 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3656 BRW_ARF_NULL, 0));
3657 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3658 payload.nr, 0));
3659 brw_set_src1(p, send, brw_imm_ud(0));
3660 brw_set_desc(p, send, brw_message_desc(devinfo, 2, 0, false));
3661 brw_inst_set_sfid(devinfo, send, sfid);
3662 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3663 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3664
3665 brw_pop_insn_state(p);
3666 }
3667
3668
3669 /**
3670 * Emit the SEND message for a barrier
3671 */
3672 void
3673 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3674 {
3675 const struct gen_device_info *devinfo = p->devinfo;
3676 struct brw_inst *inst;
3677
3678 assert(devinfo->gen >= 7);
3679
3680 brw_push_insn_state(p);
3681 brw_set_default_access_mode(p, BRW_ALIGN_1);
3682 inst = next_insn(p, BRW_OPCODE_SEND);
3683 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3684 brw_set_src0(p, inst, src);
3685 brw_set_src1(p, inst, brw_null_reg());
3686 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3687
3688 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3689 brw_inst_set_gateway_notify(devinfo, inst, 1);
3690 brw_inst_set_gateway_subfuncid(devinfo, inst,
3691 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3692
3693 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3694 brw_pop_insn_state(p);
3695 }
3696
3697
3698 /**
3699 * Emit the wait instruction for a barrier
3700 */
3701 void
3702 brw_WAIT(struct brw_codegen *p)
3703 {
3704 const struct gen_device_info *devinfo = p->devinfo;
3705 struct brw_inst *insn;
3706
3707 struct brw_reg src = brw_notification_reg();
3708
3709 insn = next_insn(p, BRW_OPCODE_WAIT);
3710 brw_set_dest(p, insn, src);
3711 brw_set_src0(p, insn, src);
3712 brw_set_src1(p, insn, brw_null_reg());
3713
3714 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3715 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3716 }
3717
3718 /**
3719 * Changes the floating point rounding mode updating the control register
3720 * field defined at cr0.0[5-6] bits. This function supports the changes to
3721 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3722 * Only RTNE and RTZ rounding are enabled at nir.
3723 */
3724 void
3725 brw_rounding_mode(struct brw_codegen *p,
3726 enum brw_rnd_mode mode)
3727 {
3728 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3729
3730 if (bits != BRW_CR0_RND_MODE_MASK) {
3731 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3732 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3733 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3734
3735 /* From the Skylake PRM, Volume 7, page 760:
3736 * "Implementation Restriction on Register Access: When the control
3737 * register is used as an explicit source and/or destination, hardware
3738 * does not ensure execution pipeline coherency. Software must set the
3739 * thread control field to ‘switch’ for an instruction that uses
3740 * control register as an explicit operand."
3741 */
3742 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3743 }
3744
3745 if (bits) {
3746 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3747 brw_imm_ud(bits));
3748 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3749 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3750 }
3751 }