TRIVIAL: intel/eu: Use a local devinfo variable in brw_shader_time_add().
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 void
88 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
89 {
90 const struct gen_device_info *devinfo = p->devinfo;
91
92 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
93 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
94 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
95 assert(dest.nr < 128);
96
97 gen7_convert_mrf_to_grf(p, &dest);
98
99 brw_inst_set_dst_file_type(devinfo, inst, dest.file, dest.type);
100 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
101
102 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
103 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
104
105 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
106 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
107 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
108 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
109 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
110 } else {
111 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
112 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
113 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
114 dest.file == BRW_MESSAGE_REGISTER_FILE) {
115 assert(dest.writemask != 0);
116 }
117 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
118 * Although Dst.HorzStride is a don't care for Align16, HW needs
119 * this to be programmed as "01".
120 */
121 brw_inst_set_dst_hstride(devinfo, inst, 1);
122 }
123 } else {
124 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
125
126 /* These are different sizes in align1 vs align16:
127 */
128 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
129 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
130 dest.indirect_offset);
131 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
132 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
133 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
134 } else {
135 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
136 dest.indirect_offset);
137 /* even ignored in da16, still need to set as '01' */
138 brw_inst_set_dst_hstride(devinfo, inst, 1);
139 }
140 }
141
142 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
143 * or 16 (SIMD16), as that's normally correct. However, when dealing with
144 * small registers, it can be useful for us to automatically reduce it to
145 * match the register size.
146 */
147 if (p->automatic_exec_sizes) {
148 /*
149 * In platforms that support fp64 we can emit instructions with a width
150 * of 4 that need two SIMD8 registers and an exec_size of 8 or 16. In
151 * these cases we need to make sure that these instructions have their
152 * exec sizes set properly when they are emitted and we can't rely on
153 * this code to fix it.
154 */
155 bool fix_exec_size;
156 if (devinfo->gen >= 6)
157 fix_exec_size = dest.width < BRW_EXECUTE_4;
158 else
159 fix_exec_size = dest.width < BRW_EXECUTE_8;
160
161 if (fix_exec_size)
162 brw_inst_set_exec_size(devinfo, inst, dest.width);
163 }
164 }
165
166 void
167 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
168 {
169 const struct gen_device_info *devinfo = p->devinfo;
170
171 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
172 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
173 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
174 assert(reg.nr < 128);
175
176 gen7_convert_mrf_to_grf(p, &reg);
177
178 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
179 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
180 /* Any source modifiers or regions will be ignored, since this just
181 * identifies the MRF/GRF to start reading the message contents from.
182 * Check for some likely failures.
183 */
184 assert(!reg.negate);
185 assert(!reg.abs);
186 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
187 }
188
189 brw_inst_set_src0_file_type(devinfo, inst, reg.file, reg.type);
190 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
191 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
192 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
193
194 if (reg.file == BRW_IMMEDIATE_VALUE) {
195 if (reg.type == BRW_REGISTER_TYPE_DF ||
196 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
197 brw_inst_set_imm_df(devinfo, inst, reg.df);
198 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
199 reg.type == BRW_REGISTER_TYPE_Q)
200 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
201 else
202 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
203
204 if (type_sz(reg.type) < 8) {
205 brw_inst_set_src1_reg_file(devinfo, inst,
206 BRW_ARCHITECTURE_REGISTER_FILE);
207 brw_inst_set_src1_reg_hw_type(devinfo, inst,
208 brw_inst_src0_reg_hw_type(devinfo, inst));
209 }
210 } else {
211 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
212 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
213 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
214 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
215 } else {
216 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
217 }
218 } else {
219 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
220
221 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
222 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
223 } else {
224 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
225 }
226 }
227
228 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
229 if (reg.width == BRW_WIDTH_1 &&
230 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
231 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
232 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
233 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
234 } else {
235 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
236 brw_inst_set_src0_width(devinfo, inst, reg.width);
237 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
238 }
239 } else {
240 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
241 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
242 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
243 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
244 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
245 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
246 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
247 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
248
249 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
250 /* This is an oddity of the fact we're using the same
251 * descriptions for registers in align_16 as align_1:
252 */
253 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
254 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
255 reg.type == BRW_REGISTER_TYPE_DF &&
256 reg.vstride == BRW_VERTICAL_STRIDE_2) {
257 /* From SNB PRM:
258 *
259 * "For Align16 access mode, only encodings of 0000 and 0011
260 * are allowed. Other codes are reserved."
261 *
262 * Presumably the DevSNB behavior applies to IVB as well.
263 */
264 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
265 } else {
266 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
267 }
268 }
269 }
270 }
271
272
273 void
274 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
275 {
276 const struct gen_device_info *devinfo = p->devinfo;
277
278 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
279 assert(reg.nr < 128);
280
281 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
282 *
283 * "Accumulator registers may be accessed explicitly as src0
284 * operands only."
285 */
286 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
287 reg.nr != BRW_ARF_ACCUMULATOR);
288
289 gen7_convert_mrf_to_grf(p, &reg);
290 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
291
292 brw_inst_set_src1_file_type(devinfo, inst, reg.file, reg.type);
293 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
294 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
295
296 /* Only src1 can be immediate in two-argument instructions.
297 */
298 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
299
300 if (reg.file == BRW_IMMEDIATE_VALUE) {
301 /* two-argument instructions can only use 32-bit immediates */
302 assert(type_sz(reg.type) < 8);
303 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
304 } else {
305 /* This is a hardware restriction, which may or may not be lifted
306 * in the future:
307 */
308 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
309 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
310
311 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
312 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
313 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
314 } else {
315 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
316 }
317
318 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
319 if (reg.width == BRW_WIDTH_1 &&
320 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
321 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
322 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
323 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
324 } else {
325 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
326 brw_inst_set_src1_width(devinfo, inst, reg.width);
327 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
328 }
329 } else {
330 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
331 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
332 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
333 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
334 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
335 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
336 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
337 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
338
339 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
340 /* This is an oddity of the fact we're using the same
341 * descriptions for registers in align_16 as align_1:
342 */
343 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
344 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
345 reg.type == BRW_REGISTER_TYPE_DF &&
346 reg.vstride == BRW_VERTICAL_STRIDE_2) {
347 /* From SNB PRM:
348 *
349 * "For Align16 access mode, only encodings of 0000 and 0011
350 * are allowed. Other codes are reserved."
351 *
352 * Presumably the DevSNB behavior applies to IVB as well.
353 */
354 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
355 } else {
356 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
357 }
358 }
359 }
360 }
361
362 /**
363 * Specify the descriptor and extended descriptor immediate for a SEND(C)
364 * message instruction.
365 */
366 void
367 brw_set_desc_ex(struct brw_codegen *p, brw_inst *inst,
368 unsigned desc, unsigned ex_desc)
369 {
370 const struct gen_device_info *devinfo = p->devinfo;
371 brw_inst_set_src1_file_type(devinfo, inst,
372 BRW_IMMEDIATE_VALUE, BRW_REGISTER_TYPE_D);
373 brw_inst_set_send_desc(devinfo, inst, desc);
374 if (devinfo->gen >= 9 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
375 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC))
376 brw_inst_set_send_ex_desc(devinfo, inst, ex_desc);
377 }
378
379 static void brw_set_math_message( struct brw_codegen *p,
380 brw_inst *inst,
381 unsigned function,
382 unsigned integer_type,
383 bool low_precision,
384 unsigned dataType )
385 {
386 const struct gen_device_info *devinfo = p->devinfo;
387 unsigned msg_length;
388 unsigned response_length;
389
390 /* Infer message length from the function */
391 switch (function) {
392 case BRW_MATH_FUNCTION_POW:
393 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
394 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
395 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
396 msg_length = 2;
397 break;
398 default:
399 msg_length = 1;
400 break;
401 }
402
403 /* Infer response length from the function */
404 switch (function) {
405 case BRW_MATH_FUNCTION_SINCOS:
406 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
407 response_length = 2;
408 break;
409 default:
410 response_length = 1;
411 break;
412 }
413
414 brw_set_desc(p, inst, brw_message_desc(
415 devinfo, msg_length, response_length, false));
416
417 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MATH);
418 brw_inst_set_math_msg_function(devinfo, inst, function);
419 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
420 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
421 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
422 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
423 brw_inst_set_saturate(devinfo, inst, 0);
424 }
425
426
427 static void brw_set_ff_sync_message(struct brw_codegen *p,
428 brw_inst *insn,
429 bool allocate,
430 unsigned response_length,
431 bool end_of_thread)
432 {
433 const struct gen_device_info *devinfo = p->devinfo;
434
435 brw_set_desc(p, insn, brw_message_desc(
436 devinfo, 1, response_length, true));
437
438 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
439 brw_inst_set_eot(devinfo, insn, end_of_thread);
440 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
441 brw_inst_set_urb_allocate(devinfo, insn, allocate);
442 /* The following fields are not used by FF_SYNC: */
443 brw_inst_set_urb_global_offset(devinfo, insn, 0);
444 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
445 brw_inst_set_urb_used(devinfo, insn, 0);
446 brw_inst_set_urb_complete(devinfo, insn, 0);
447 }
448
449 static void brw_set_urb_message( struct brw_codegen *p,
450 brw_inst *insn,
451 enum brw_urb_write_flags flags,
452 unsigned msg_length,
453 unsigned response_length,
454 unsigned offset,
455 unsigned swizzle_control )
456 {
457 const struct gen_device_info *devinfo = p->devinfo;
458
459 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
460 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
461 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
462
463 brw_set_desc(p, insn, brw_message_desc(
464 devinfo, msg_length, response_length, true));
465
466 brw_inst_set_sfid(devinfo, insn, BRW_SFID_URB);
467 brw_inst_set_eot(devinfo, insn, !!(flags & BRW_URB_WRITE_EOT));
468
469 if (flags & BRW_URB_WRITE_OWORD) {
470 assert(msg_length == 2); /* header + one OWORD of data */
471 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
472 } else {
473 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
474 }
475
476 brw_inst_set_urb_global_offset(devinfo, insn, offset);
477 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
478
479 if (devinfo->gen < 8) {
480 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
481 }
482
483 if (devinfo->gen < 7) {
484 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
485 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
486 } else {
487 brw_inst_set_urb_per_slot_offset(devinfo, insn,
488 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
489 }
490 }
491
492 void
493 brw_set_dp_write_message(struct brw_codegen *p,
494 brw_inst *insn,
495 unsigned binding_table_index,
496 unsigned msg_control,
497 unsigned msg_type,
498 unsigned target_cache,
499 unsigned msg_length,
500 bool header_present,
501 unsigned last_render_target,
502 unsigned response_length,
503 unsigned end_of_thread,
504 unsigned send_commit_msg)
505 {
506 const struct gen_device_info *devinfo = p->devinfo;
507 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
508 BRW_SFID_DATAPORT_WRITE);
509
510 brw_set_desc(p, insn, brw_message_desc(
511 devinfo, msg_length, response_length, header_present));
512
513 brw_inst_set_sfid(devinfo, insn, sfid);
514 brw_inst_set_eot(devinfo, insn, !!end_of_thread);
515 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
516 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
517 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
518 brw_inst_set_rt_last(devinfo, insn, last_render_target);
519 if (devinfo->gen < 7) {
520 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
521 }
522
523 if (devinfo->gen >= 11)
524 brw_inst_set_null_rt(devinfo, insn, false);
525 }
526
527 void
528 brw_set_dp_read_message(struct brw_codegen *p,
529 brw_inst *insn,
530 unsigned binding_table_index,
531 unsigned msg_control,
532 unsigned msg_type,
533 unsigned target_cache,
534 unsigned msg_length,
535 bool header_present,
536 unsigned response_length)
537 {
538 const struct gen_device_info *devinfo = p->devinfo;
539 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
540 BRW_SFID_DATAPORT_READ);
541
542 brw_set_desc(p, insn, brw_message_desc(
543 devinfo, msg_length, response_length, header_present));
544
545 const unsigned opcode = brw_inst_opcode(devinfo, insn);
546 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC)
547 brw_inst_set_sfid(devinfo, insn, sfid);
548 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
549 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
550 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
551 if (devinfo->gen < 6)
552 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
553 }
554
555 void
556 brw_set_sampler_message(struct brw_codegen *p,
557 brw_inst *inst,
558 unsigned binding_table_index,
559 unsigned sampler,
560 unsigned msg_type,
561 unsigned response_length,
562 unsigned msg_length,
563 unsigned header_present,
564 unsigned simd_mode,
565 unsigned return_format)
566 {
567 const struct gen_device_info *devinfo = p->devinfo;
568
569 brw_set_desc(p, inst, brw_message_desc(
570 devinfo, msg_length, response_length, header_present));
571
572 const unsigned opcode = brw_inst_opcode(devinfo, inst);
573 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC)
574 brw_inst_set_sfid(devinfo, inst, BRW_SFID_SAMPLER);
575 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
576 brw_inst_set_sampler(devinfo, inst, sampler);
577 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
578 if (devinfo->gen >= 5) {
579 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
580 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
581 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
582 }
583 }
584
585 static void
586 gen7_set_dp_scratch_message(struct brw_codegen *p,
587 brw_inst *inst,
588 bool write,
589 bool dword,
590 bool invalidate_after_read,
591 unsigned num_regs,
592 unsigned addr_offset,
593 unsigned mlen,
594 unsigned rlen,
595 bool header_present)
596 {
597 const struct gen_device_info *devinfo = p->devinfo;
598 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
599 (devinfo->gen >= 8 && num_regs == 8));
600 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
601 num_regs - 1);
602
603 brw_set_desc(p, inst, brw_message_desc(
604 devinfo, mlen, rlen, header_present));
605
606 brw_inst_set_sfid(devinfo, inst, GEN7_SFID_DATAPORT_DATA_CACHE);
607 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
608 brw_inst_set_scratch_read_write(devinfo, inst, write);
609 brw_inst_set_scratch_type(devinfo, inst, dword);
610 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
611 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
612 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
613 }
614
615 static void
616 brw_inst_set_state(const struct gen_device_info *devinfo,
617 brw_inst *insn,
618 const struct brw_insn_state *state)
619 {
620 brw_inst_set_exec_size(devinfo, insn, state->exec_size);
621 brw_inst_set_group(devinfo, insn, state->group);
622 brw_inst_set_compression(devinfo, insn, state->compressed);
623 brw_inst_set_access_mode(devinfo, insn, state->access_mode);
624 brw_inst_set_mask_control(devinfo, insn, state->mask_control);
625 brw_inst_set_saturate(devinfo, insn, state->saturate);
626 brw_inst_set_pred_control(devinfo, insn, state->predicate);
627 brw_inst_set_pred_inv(devinfo, insn, state->pred_inv);
628
629 if (is_3src(devinfo, brw_inst_opcode(devinfo, insn)) &&
630 state->access_mode == BRW_ALIGN_16) {
631 brw_inst_set_3src_a16_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
632 if (devinfo->gen >= 7)
633 brw_inst_set_3src_a16_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
634 } else {
635 brw_inst_set_flag_subreg_nr(devinfo, insn, state->flag_subreg % 2);
636 if (devinfo->gen >= 7)
637 brw_inst_set_flag_reg_nr(devinfo, insn, state->flag_subreg / 2);
638 }
639
640 if (devinfo->gen >= 6)
641 brw_inst_set_acc_wr_control(devinfo, insn, state->acc_wr_control);
642 }
643
644 #define next_insn brw_next_insn
645 brw_inst *
646 brw_next_insn(struct brw_codegen *p, unsigned opcode)
647 {
648 const struct gen_device_info *devinfo = p->devinfo;
649 brw_inst *insn;
650
651 if (p->nr_insn + 1 > p->store_size) {
652 p->store_size <<= 1;
653 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
654 }
655
656 p->next_insn_offset += 16;
657 insn = &p->store[p->nr_insn++];
658
659 memset(insn, 0, sizeof(*insn));
660 brw_inst_set_opcode(devinfo, insn, opcode);
661
662 /* Apply the default instruction state */
663 brw_inst_set_state(devinfo, insn, p->current);
664
665 return insn;
666 }
667
668 static brw_inst *
669 brw_alu1(struct brw_codegen *p, unsigned opcode,
670 struct brw_reg dest, struct brw_reg src)
671 {
672 brw_inst *insn = next_insn(p, opcode);
673 brw_set_dest(p, insn, dest);
674 brw_set_src0(p, insn, src);
675 return insn;
676 }
677
678 static brw_inst *
679 brw_alu2(struct brw_codegen *p, unsigned opcode,
680 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
681 {
682 /* 64-bit immediates are only supported on 1-src instructions */
683 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
684 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
685
686 brw_inst *insn = next_insn(p, opcode);
687 brw_set_dest(p, insn, dest);
688 brw_set_src0(p, insn, src0);
689 brw_set_src1(p, insn, src1);
690 return insn;
691 }
692
693 static int
694 get_3src_subreg_nr(struct brw_reg reg)
695 {
696 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
697 * use 32-bit units (components 0..7). Since they only support F/D/UD
698 * types, this doesn't lose any flexibility, but uses fewer bits.
699 */
700 return reg.subnr / 4;
701 }
702
703 static enum gen10_align1_3src_vertical_stride
704 to_3src_align1_vstride(enum brw_vertical_stride vstride)
705 {
706 switch (vstride) {
707 case BRW_VERTICAL_STRIDE_0:
708 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_0;
709 case BRW_VERTICAL_STRIDE_2:
710 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_2;
711 case BRW_VERTICAL_STRIDE_4:
712 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_4;
713 case BRW_VERTICAL_STRIDE_8:
714 case BRW_VERTICAL_STRIDE_16:
715 return BRW_ALIGN1_3SRC_VERTICAL_STRIDE_8;
716 default:
717 unreachable("invalid vstride");
718 }
719 }
720
721
722 static enum gen10_align1_3src_src_horizontal_stride
723 to_3src_align1_hstride(enum brw_horizontal_stride hstride)
724 {
725 switch (hstride) {
726 case BRW_HORIZONTAL_STRIDE_0:
727 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_0;
728 case BRW_HORIZONTAL_STRIDE_1:
729 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_1;
730 case BRW_HORIZONTAL_STRIDE_2:
731 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_2;
732 case BRW_HORIZONTAL_STRIDE_4:
733 return BRW_ALIGN1_3SRC_SRC_HORIZONTAL_STRIDE_4;
734 default:
735 unreachable("invalid hstride");
736 }
737 }
738
739 static brw_inst *
740 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
741 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
742 {
743 const struct gen_device_info *devinfo = p->devinfo;
744 brw_inst *inst = next_insn(p, opcode);
745
746 gen7_convert_mrf_to_grf(p, &dest);
747
748 assert(dest.nr < 128);
749 assert(src0.nr < 128);
750 assert(src1.nr < 128);
751 assert(src2.nr < 128);
752 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
753 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
754 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
755 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
756
757 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
758 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
759 dest.file == BRW_ARCHITECTURE_REGISTER_FILE);
760
761 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE) {
762 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
763 BRW_ALIGN1_3SRC_ACCUMULATOR);
764 brw_inst_set_3src_dst_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
765 } else {
766 brw_inst_set_3src_a1_dst_reg_file(devinfo, inst,
767 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE);
768 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
769 }
770 brw_inst_set_3src_a1_dst_subreg_nr(devinfo, inst, dest.subnr / 8);
771
772 brw_inst_set_3src_a1_dst_hstride(devinfo, inst, BRW_ALIGN1_3SRC_DST_HORIZONTAL_STRIDE_1);
773
774 if (brw_reg_type_is_floating_point(dest.type)) {
775 brw_inst_set_3src_a1_exec_type(devinfo, inst,
776 BRW_ALIGN1_3SRC_EXEC_TYPE_FLOAT);
777 } else {
778 brw_inst_set_3src_a1_exec_type(devinfo, inst,
779 BRW_ALIGN1_3SRC_EXEC_TYPE_INT);
780 }
781
782 brw_inst_set_3src_a1_dst_type(devinfo, inst, dest.type);
783 brw_inst_set_3src_a1_src0_type(devinfo, inst, src0.type);
784 brw_inst_set_3src_a1_src1_type(devinfo, inst, src1.type);
785 brw_inst_set_3src_a1_src2_type(devinfo, inst, src2.type);
786
787 brw_inst_set_3src_a1_src0_vstride(devinfo, inst,
788 to_3src_align1_vstride(src0.vstride));
789 brw_inst_set_3src_a1_src1_vstride(devinfo, inst,
790 to_3src_align1_vstride(src1.vstride));
791 /* no vstride on src2 */
792
793 brw_inst_set_3src_a1_src0_hstride(devinfo, inst,
794 to_3src_align1_hstride(src0.hstride));
795 brw_inst_set_3src_a1_src1_hstride(devinfo, inst,
796 to_3src_align1_hstride(src1.hstride));
797 brw_inst_set_3src_a1_src2_hstride(devinfo, inst,
798 to_3src_align1_hstride(src2.hstride));
799
800 brw_inst_set_3src_a1_src0_subreg_nr(devinfo, inst, src0.subnr);
801 if (src0.type == BRW_REGISTER_TYPE_NF) {
802 brw_inst_set_3src_src0_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
803 } else {
804 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
805 }
806 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
807 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
808
809 brw_inst_set_3src_a1_src1_subreg_nr(devinfo, inst, src1.subnr);
810 if (src1.file == BRW_ARCHITECTURE_REGISTER_FILE) {
811 brw_inst_set_3src_src1_reg_nr(devinfo, inst, BRW_ARF_ACCUMULATOR);
812 } else {
813 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
814 }
815 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
816 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
817
818 brw_inst_set_3src_a1_src2_subreg_nr(devinfo, inst, src2.subnr);
819 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
820 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
821 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
822
823 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
824 src0.file == BRW_IMMEDIATE_VALUE ||
825 (src0.file == BRW_ARCHITECTURE_REGISTER_FILE &&
826 src0.type == BRW_REGISTER_TYPE_NF));
827 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
828 src1.file == BRW_ARCHITECTURE_REGISTER_FILE);
829 assert(src2.file == BRW_GENERAL_REGISTER_FILE ||
830 src2.file == BRW_IMMEDIATE_VALUE);
831
832 brw_inst_set_3src_a1_src0_reg_file(devinfo, inst,
833 src0.file == BRW_GENERAL_REGISTER_FILE ?
834 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
835 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
836 brw_inst_set_3src_a1_src1_reg_file(devinfo, inst,
837 src1.file == BRW_GENERAL_REGISTER_FILE ?
838 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
839 BRW_ALIGN1_3SRC_ACCUMULATOR);
840 brw_inst_set_3src_a1_src2_reg_file(devinfo, inst,
841 src2.file == BRW_GENERAL_REGISTER_FILE ?
842 BRW_ALIGN1_3SRC_GENERAL_REGISTER_FILE :
843 BRW_ALIGN1_3SRC_IMMEDIATE_VALUE);
844 } else {
845 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
846 dest.file == BRW_MESSAGE_REGISTER_FILE);
847 assert(dest.type == BRW_REGISTER_TYPE_F ||
848 dest.type == BRW_REGISTER_TYPE_DF ||
849 dest.type == BRW_REGISTER_TYPE_D ||
850 dest.type == BRW_REGISTER_TYPE_UD);
851 if (devinfo->gen == 6) {
852 brw_inst_set_3src_a16_dst_reg_file(devinfo, inst,
853 dest.file == BRW_MESSAGE_REGISTER_FILE);
854 }
855 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
856 brw_inst_set_3src_a16_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
857 brw_inst_set_3src_a16_dst_writemask(devinfo, inst, dest.writemask);
858
859 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
860 brw_inst_set_3src_a16_src0_swizzle(devinfo, inst, src0.swizzle);
861 brw_inst_set_3src_a16_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
862 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
863 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
864 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
865 brw_inst_set_3src_a16_src0_rep_ctrl(devinfo, inst,
866 src0.vstride == BRW_VERTICAL_STRIDE_0);
867
868 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
869 brw_inst_set_3src_a16_src1_swizzle(devinfo, inst, src1.swizzle);
870 brw_inst_set_3src_a16_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
871 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
872 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
873 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
874 brw_inst_set_3src_a16_src1_rep_ctrl(devinfo, inst,
875 src1.vstride == BRW_VERTICAL_STRIDE_0);
876
877 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
878 brw_inst_set_3src_a16_src2_swizzle(devinfo, inst, src2.swizzle);
879 brw_inst_set_3src_a16_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
880 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
881 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
882 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
883 brw_inst_set_3src_a16_src2_rep_ctrl(devinfo, inst,
884 src2.vstride == BRW_VERTICAL_STRIDE_0);
885
886 if (devinfo->gen >= 7) {
887 /* Set both the source and destination types based on dest.type,
888 * ignoring the source register types. The MAD and LRP emitters ensure
889 * that all four types are float. The BFE and BFI2 emitters, however,
890 * may send us mixed D and UD types and want us to ignore that and use
891 * the destination type.
892 */
893 brw_inst_set_3src_a16_src_type(devinfo, inst, dest.type);
894 brw_inst_set_3src_a16_dst_type(devinfo, inst, dest.type);
895 }
896 }
897
898 return inst;
899 }
900
901
902 /***********************************************************************
903 * Convenience routines.
904 */
905 #define ALU1(OP) \
906 brw_inst *brw_##OP(struct brw_codegen *p, \
907 struct brw_reg dest, \
908 struct brw_reg src0) \
909 { \
910 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
911 }
912
913 #define ALU2(OP) \
914 brw_inst *brw_##OP(struct brw_codegen *p, \
915 struct brw_reg dest, \
916 struct brw_reg src0, \
917 struct brw_reg src1) \
918 { \
919 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
920 }
921
922 #define ALU3(OP) \
923 brw_inst *brw_##OP(struct brw_codegen *p, \
924 struct brw_reg dest, \
925 struct brw_reg src0, \
926 struct brw_reg src1, \
927 struct brw_reg src2) \
928 { \
929 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
930 }
931
932 #define ALU3F(OP) \
933 brw_inst *brw_##OP(struct brw_codegen *p, \
934 struct brw_reg dest, \
935 struct brw_reg src0, \
936 struct brw_reg src1, \
937 struct brw_reg src2) \
938 { \
939 assert(dest.type == BRW_REGISTER_TYPE_F || \
940 dest.type == BRW_REGISTER_TYPE_DF); \
941 if (dest.type == BRW_REGISTER_TYPE_F) { \
942 assert(src0.type == BRW_REGISTER_TYPE_F); \
943 assert(src1.type == BRW_REGISTER_TYPE_F); \
944 assert(src2.type == BRW_REGISTER_TYPE_F); \
945 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
946 assert(src0.type == BRW_REGISTER_TYPE_DF); \
947 assert(src1.type == BRW_REGISTER_TYPE_DF); \
948 assert(src2.type == BRW_REGISTER_TYPE_DF); \
949 } \
950 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
951 }
952
953 /* Rounding operations (other than RNDD) require two instructions - the first
954 * stores a rounded value (possibly the wrong way) in the dest register, but
955 * also sets a per-channel "increment bit" in the flag register. A predicated
956 * add of 1.0 fixes dest to contain the desired result.
957 *
958 * Sandybridge and later appear to round correctly without an ADD.
959 */
960 #define ROUND(OP) \
961 void brw_##OP(struct brw_codegen *p, \
962 struct brw_reg dest, \
963 struct brw_reg src) \
964 { \
965 const struct gen_device_info *devinfo = p->devinfo; \
966 brw_inst *rnd, *add; \
967 rnd = next_insn(p, BRW_OPCODE_##OP); \
968 brw_set_dest(p, rnd, dest); \
969 brw_set_src0(p, rnd, src); \
970 \
971 if (devinfo->gen < 6) { \
972 /* turn on round-increments */ \
973 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
974 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
975 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
976 } \
977 }
978
979
980 ALU2(SEL)
981 ALU1(NOT)
982 ALU2(AND)
983 ALU2(OR)
984 ALU2(XOR)
985 ALU2(SHR)
986 ALU2(SHL)
987 ALU1(DIM)
988 ALU2(ASR)
989 ALU3(CSEL)
990 ALU1(FRC)
991 ALU1(RNDD)
992 ALU2(MAC)
993 ALU2(MACH)
994 ALU1(LZD)
995 ALU2(DP4)
996 ALU2(DPH)
997 ALU2(DP3)
998 ALU2(DP2)
999 ALU3(MAD)
1000 ALU3F(LRP)
1001 ALU1(BFREV)
1002 ALU3(BFE)
1003 ALU2(BFI1)
1004 ALU3(BFI2)
1005 ALU1(FBH)
1006 ALU1(FBL)
1007 ALU1(CBIT)
1008 ALU2(ADDC)
1009 ALU2(SUBB)
1010
1011 ROUND(RNDZ)
1012 ROUND(RNDE)
1013
1014 brw_inst *
1015 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
1016 {
1017 const struct gen_device_info *devinfo = p->devinfo;
1018
1019 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
1020 * To avoid the problems that causes, we use a <1,2,0> source region to read
1021 * each element twice.
1022 */
1023 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1024 brw_get_default_access_mode(p) == BRW_ALIGN_1 &&
1025 dest.type == BRW_REGISTER_TYPE_DF &&
1026 (src0.type == BRW_REGISTER_TYPE_F ||
1027 src0.type == BRW_REGISTER_TYPE_D ||
1028 src0.type == BRW_REGISTER_TYPE_UD) &&
1029 !has_scalar_region(src0)) {
1030 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
1031 src0.width == BRW_WIDTH_4 &&
1032 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1033
1034 src0.vstride = BRW_VERTICAL_STRIDE_1;
1035 src0.width = BRW_WIDTH_2;
1036 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1037 }
1038
1039 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1040 }
1041
1042 brw_inst *
1043 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1044 struct brw_reg src0, struct brw_reg src1)
1045 {
1046 /* 6.2.2: add */
1047 if (src0.type == BRW_REGISTER_TYPE_F ||
1048 (src0.file == BRW_IMMEDIATE_VALUE &&
1049 src0.type == BRW_REGISTER_TYPE_VF)) {
1050 assert(src1.type != BRW_REGISTER_TYPE_UD);
1051 assert(src1.type != BRW_REGISTER_TYPE_D);
1052 }
1053
1054 if (src1.type == BRW_REGISTER_TYPE_F ||
1055 (src1.file == BRW_IMMEDIATE_VALUE &&
1056 src1.type == BRW_REGISTER_TYPE_VF)) {
1057 assert(src0.type != BRW_REGISTER_TYPE_UD);
1058 assert(src0.type != BRW_REGISTER_TYPE_D);
1059 }
1060
1061 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1062 }
1063
1064 brw_inst *
1065 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1066 struct brw_reg src0, struct brw_reg src1)
1067 {
1068 assert(dest.type == src0.type);
1069 assert(src0.type == src1.type);
1070 switch (src0.type) {
1071 case BRW_REGISTER_TYPE_B:
1072 case BRW_REGISTER_TYPE_UB:
1073 case BRW_REGISTER_TYPE_W:
1074 case BRW_REGISTER_TYPE_UW:
1075 case BRW_REGISTER_TYPE_D:
1076 case BRW_REGISTER_TYPE_UD:
1077 break;
1078 default:
1079 unreachable("Bad type for brw_AVG");
1080 }
1081
1082 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1083 }
1084
1085 brw_inst *
1086 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1087 struct brw_reg src0, struct brw_reg src1)
1088 {
1089 /* 6.32.38: mul */
1090 if (src0.type == BRW_REGISTER_TYPE_D ||
1091 src0.type == BRW_REGISTER_TYPE_UD ||
1092 src1.type == BRW_REGISTER_TYPE_D ||
1093 src1.type == BRW_REGISTER_TYPE_UD) {
1094 assert(dest.type != BRW_REGISTER_TYPE_F);
1095 }
1096
1097 if (src0.type == BRW_REGISTER_TYPE_F ||
1098 (src0.file == BRW_IMMEDIATE_VALUE &&
1099 src0.type == BRW_REGISTER_TYPE_VF)) {
1100 assert(src1.type != BRW_REGISTER_TYPE_UD);
1101 assert(src1.type != BRW_REGISTER_TYPE_D);
1102 }
1103
1104 if (src1.type == BRW_REGISTER_TYPE_F ||
1105 (src1.file == BRW_IMMEDIATE_VALUE &&
1106 src1.type == BRW_REGISTER_TYPE_VF)) {
1107 assert(src0.type != BRW_REGISTER_TYPE_UD);
1108 assert(src0.type != BRW_REGISTER_TYPE_D);
1109 }
1110
1111 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1112 src0.nr != BRW_ARF_ACCUMULATOR);
1113 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1114 src1.nr != BRW_ARF_ACCUMULATOR);
1115
1116 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1117 }
1118
1119 brw_inst *
1120 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1121 struct brw_reg src0, struct brw_reg src1)
1122 {
1123 src0.vstride = BRW_VERTICAL_STRIDE_0;
1124 src0.width = BRW_WIDTH_1;
1125 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1126 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1127 }
1128
1129 brw_inst *
1130 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1131 struct brw_reg src0, struct brw_reg src1)
1132 {
1133 src0.vstride = BRW_VERTICAL_STRIDE_0;
1134 src0.width = BRW_WIDTH_1;
1135 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1136 src1.vstride = BRW_VERTICAL_STRIDE_8;
1137 src1.width = BRW_WIDTH_8;
1138 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1139 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1140 }
1141
1142 brw_inst *
1143 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1144 {
1145 const struct gen_device_info *devinfo = p->devinfo;
1146 const bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1147 /* The F32TO16 instruction doesn't support 32-bit destination types in
1148 * Align1 mode, and neither does the Gen8 implementation in terms of a
1149 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1150 * an undocumented feature.
1151 */
1152 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1153 (!align16 || devinfo->gen >= 8));
1154 brw_inst *inst;
1155
1156 if (align16) {
1157 assert(dst.type == BRW_REGISTER_TYPE_UD);
1158 } else {
1159 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1160 dst.type == BRW_REGISTER_TYPE_W ||
1161 dst.type == BRW_REGISTER_TYPE_UW ||
1162 dst.type == BRW_REGISTER_TYPE_HF);
1163 }
1164
1165 brw_push_insn_state(p);
1166
1167 if (needs_zero_fill) {
1168 brw_set_default_access_mode(p, BRW_ALIGN_1);
1169 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1170 }
1171
1172 if (devinfo->gen >= 8) {
1173 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1174 } else {
1175 assert(devinfo->gen == 7);
1176 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1177 }
1178
1179 if (needs_zero_fill) {
1180 brw_inst_set_no_dd_clear(devinfo, inst, true);
1181 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1182 brw_inst_set_no_dd_check(devinfo, inst, true);
1183 }
1184
1185 brw_pop_insn_state(p);
1186 return inst;
1187 }
1188
1189 brw_inst *
1190 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1191 {
1192 const struct gen_device_info *devinfo = p->devinfo;
1193 bool align16 = brw_get_default_access_mode(p) == BRW_ALIGN_16;
1194
1195 if (align16) {
1196 assert(src.type == BRW_REGISTER_TYPE_UD);
1197 } else {
1198 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1199 *
1200 * Because this instruction does not have a 16-bit floating-point
1201 * type, the source data type must be Word (W). The destination type
1202 * must be F (Float).
1203 */
1204 if (src.type == BRW_REGISTER_TYPE_UD)
1205 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1206
1207 assert(src.type == BRW_REGISTER_TYPE_W ||
1208 src.type == BRW_REGISTER_TYPE_UW ||
1209 src.type == BRW_REGISTER_TYPE_HF);
1210 }
1211
1212 if (devinfo->gen >= 8) {
1213 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1214 } else {
1215 assert(devinfo->gen == 7);
1216 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1217 }
1218 }
1219
1220
1221 void brw_NOP(struct brw_codegen *p)
1222 {
1223 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1224 memset(insn, 0, sizeof(*insn));
1225 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1226 }
1227
1228
1229
1230
1231
1232 /***********************************************************************
1233 * Comparisons, if/else/endif
1234 */
1235
1236 brw_inst *
1237 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1238 unsigned predicate_control)
1239 {
1240 const struct gen_device_info *devinfo = p->devinfo;
1241 struct brw_reg ip = brw_ip_reg();
1242 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1243
1244 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_1);
1245 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1246 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1247 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1248
1249 return inst;
1250 }
1251
1252 static void
1253 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1254 {
1255 p->if_stack[p->if_stack_depth] = inst - p->store;
1256
1257 p->if_stack_depth++;
1258 if (p->if_stack_array_size <= p->if_stack_depth) {
1259 p->if_stack_array_size *= 2;
1260 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1261 p->if_stack_array_size);
1262 }
1263 }
1264
1265 static brw_inst *
1266 pop_if_stack(struct brw_codegen *p)
1267 {
1268 p->if_stack_depth--;
1269 return &p->store[p->if_stack[p->if_stack_depth]];
1270 }
1271
1272 static void
1273 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1274 {
1275 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1276 p->loop_stack_array_size *= 2;
1277 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1278 p->loop_stack_array_size);
1279 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1280 p->loop_stack_array_size);
1281 }
1282
1283 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1284 p->loop_stack_depth++;
1285 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1286 }
1287
1288 static brw_inst *
1289 get_inner_do_insn(struct brw_codegen *p)
1290 {
1291 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1292 }
1293
1294 /* EU takes the value from the flag register and pushes it onto some
1295 * sort of a stack (presumably merging with any flag value already on
1296 * the stack). Within an if block, the flags at the top of the stack
1297 * control execution on each channel of the unit, eg. on each of the
1298 * 16 pixel values in our wm programs.
1299 *
1300 * When the matching 'else' instruction is reached (presumably by
1301 * countdown of the instruction count patched in by our ELSE/ENDIF
1302 * functions), the relevant flags are inverted.
1303 *
1304 * When the matching 'endif' instruction is reached, the flags are
1305 * popped off. If the stack is now empty, normal execution resumes.
1306 */
1307 brw_inst *
1308 brw_IF(struct brw_codegen *p, unsigned execute_size)
1309 {
1310 const struct gen_device_info *devinfo = p->devinfo;
1311 brw_inst *insn;
1312
1313 insn = next_insn(p, BRW_OPCODE_IF);
1314
1315 /* Override the defaults for this instruction:
1316 */
1317 if (devinfo->gen < 6) {
1318 brw_set_dest(p, insn, brw_ip_reg());
1319 brw_set_src0(p, insn, brw_ip_reg());
1320 brw_set_src1(p, insn, brw_imm_d(0x0));
1321 } else if (devinfo->gen == 6) {
1322 brw_set_dest(p, insn, brw_imm_w(0));
1323 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1324 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1325 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1326 } else if (devinfo->gen == 7) {
1327 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1328 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1329 brw_set_src1(p, insn, brw_imm_w(0));
1330 brw_inst_set_jip(devinfo, insn, 0);
1331 brw_inst_set_uip(devinfo, insn, 0);
1332 } else {
1333 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1334 brw_set_src0(p, insn, brw_imm_d(0));
1335 brw_inst_set_jip(devinfo, insn, 0);
1336 brw_inst_set_uip(devinfo, insn, 0);
1337 }
1338
1339 brw_inst_set_exec_size(devinfo, insn, execute_size);
1340 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1341 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1342 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1343 if (!p->single_program_flow && devinfo->gen < 6)
1344 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1345
1346 push_if_stack(p, insn);
1347 p->if_depth_in_loop[p->loop_stack_depth]++;
1348 return insn;
1349 }
1350
1351 /* This function is only used for gen6-style IF instructions with an
1352 * embedded comparison (conditional modifier). It is not used on gen7.
1353 */
1354 brw_inst *
1355 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1356 struct brw_reg src0, struct brw_reg src1)
1357 {
1358 const struct gen_device_info *devinfo = p->devinfo;
1359 brw_inst *insn;
1360
1361 insn = next_insn(p, BRW_OPCODE_IF);
1362
1363 brw_set_dest(p, insn, brw_imm_w(0));
1364 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1365 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1366 brw_set_src0(p, insn, src0);
1367 brw_set_src1(p, insn, src1);
1368
1369 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1370 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1371 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1372
1373 push_if_stack(p, insn);
1374 return insn;
1375 }
1376
1377 /**
1378 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1379 */
1380 static void
1381 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1382 brw_inst *if_inst, brw_inst *else_inst)
1383 {
1384 const struct gen_device_info *devinfo = p->devinfo;
1385
1386 /* The next instruction (where the ENDIF would be, if it existed) */
1387 brw_inst *next_inst = &p->store[p->nr_insn];
1388
1389 assert(p->single_program_flow);
1390 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1391 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1392 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1393
1394 /* Convert IF to an ADD instruction that moves the instruction pointer
1395 * to the first instruction of the ELSE block. If there is no ELSE
1396 * block, point to where ENDIF would be. Reverse the predicate.
1397 *
1398 * There's no need to execute an ENDIF since we don't need to do any
1399 * stack operations, and if we're currently executing, we just want to
1400 * continue normally.
1401 */
1402 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1403 brw_inst_set_pred_inv(devinfo, if_inst, true);
1404
1405 if (else_inst != NULL) {
1406 /* Convert ELSE to an ADD instruction that points where the ENDIF
1407 * would be.
1408 */
1409 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1410
1411 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1412 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1413 } else {
1414 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1415 }
1416 }
1417
1418 /**
1419 * Patch IF and ELSE instructions with appropriate jump targets.
1420 */
1421 static void
1422 patch_IF_ELSE(struct brw_codegen *p,
1423 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1424 {
1425 const struct gen_device_info *devinfo = p->devinfo;
1426
1427 /* We shouldn't be patching IF and ELSE instructions in single program flow
1428 * mode when gen < 6, because in single program flow mode on those
1429 * platforms, we convert flow control instructions to conditional ADDs that
1430 * operate on IP (see brw_ENDIF).
1431 *
1432 * However, on Gen6, writing to IP doesn't work in single program flow mode
1433 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1434 * not be updated by non-flow control instructions."). And on later
1435 * platforms, there is no significant benefit to converting control flow
1436 * instructions to conditional ADDs. So we do patch IF and ELSE
1437 * instructions in single program flow mode on those platforms.
1438 */
1439 if (devinfo->gen < 6)
1440 assert(!p->single_program_flow);
1441
1442 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1443 assert(endif_inst != NULL);
1444 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1445
1446 unsigned br = brw_jump_scale(devinfo);
1447
1448 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1449 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1450
1451 if (else_inst == NULL) {
1452 /* Patch IF -> ENDIF */
1453 if (devinfo->gen < 6) {
1454 /* Turn it into an IFF, which means no mask stack operations for
1455 * all-false and jumping past the ENDIF.
1456 */
1457 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1458 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1459 br * (endif_inst - if_inst + 1));
1460 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1461 } else if (devinfo->gen == 6) {
1462 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1463 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1464 } else {
1465 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1466 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1467 }
1468 } else {
1469 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1470
1471 /* Patch IF -> ELSE */
1472 if (devinfo->gen < 6) {
1473 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1474 br * (else_inst - if_inst));
1475 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1476 } else if (devinfo->gen == 6) {
1477 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1478 br * (else_inst - if_inst + 1));
1479 }
1480
1481 /* Patch ELSE -> ENDIF */
1482 if (devinfo->gen < 6) {
1483 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1484 * matching ENDIF.
1485 */
1486 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1487 br * (endif_inst - else_inst + 1));
1488 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1489 } else if (devinfo->gen == 6) {
1490 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1491 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1492 br * (endif_inst - else_inst));
1493 } else {
1494 /* The IF instruction's JIP should point just past the ELSE */
1495 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1496 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1497 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1498 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1499 if (devinfo->gen >= 8) {
1500 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1501 * should point to ENDIF.
1502 */
1503 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1504 }
1505 }
1506 }
1507 }
1508
1509 void
1510 brw_ELSE(struct brw_codegen *p)
1511 {
1512 const struct gen_device_info *devinfo = p->devinfo;
1513 brw_inst *insn;
1514
1515 insn = next_insn(p, BRW_OPCODE_ELSE);
1516
1517 if (devinfo->gen < 6) {
1518 brw_set_dest(p, insn, brw_ip_reg());
1519 brw_set_src0(p, insn, brw_ip_reg());
1520 brw_set_src1(p, insn, brw_imm_d(0x0));
1521 } else if (devinfo->gen == 6) {
1522 brw_set_dest(p, insn, brw_imm_w(0));
1523 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1524 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1525 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1526 } else if (devinfo->gen == 7) {
1527 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1528 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1529 brw_set_src1(p, insn, brw_imm_w(0));
1530 brw_inst_set_jip(devinfo, insn, 0);
1531 brw_inst_set_uip(devinfo, insn, 0);
1532 } else {
1533 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1534 brw_set_src0(p, insn, brw_imm_d(0));
1535 brw_inst_set_jip(devinfo, insn, 0);
1536 brw_inst_set_uip(devinfo, insn, 0);
1537 }
1538
1539 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1540 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1541 if (!p->single_program_flow && devinfo->gen < 6)
1542 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1543
1544 push_if_stack(p, insn);
1545 }
1546
1547 void
1548 brw_ENDIF(struct brw_codegen *p)
1549 {
1550 const struct gen_device_info *devinfo = p->devinfo;
1551 brw_inst *insn = NULL;
1552 brw_inst *else_inst = NULL;
1553 brw_inst *if_inst = NULL;
1554 brw_inst *tmp;
1555 bool emit_endif = true;
1556
1557 /* In single program flow mode, we can express IF and ELSE instructions
1558 * equivalently as ADD instructions that operate on IP. On platforms prior
1559 * to Gen6, flow control instructions cause an implied thread switch, so
1560 * this is a significant savings.
1561 *
1562 * However, on Gen6, writing to IP doesn't work in single program flow mode
1563 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1564 * not be updated by non-flow control instructions."). And on later
1565 * platforms, there is no significant benefit to converting control flow
1566 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1567 * Gen5.
1568 */
1569 if (devinfo->gen < 6 && p->single_program_flow)
1570 emit_endif = false;
1571
1572 /*
1573 * A single next_insn() may change the base address of instruction store
1574 * memory(p->store), so call it first before referencing the instruction
1575 * store pointer from an index
1576 */
1577 if (emit_endif)
1578 insn = next_insn(p, BRW_OPCODE_ENDIF);
1579
1580 /* Pop the IF and (optional) ELSE instructions from the stack */
1581 p->if_depth_in_loop[p->loop_stack_depth]--;
1582 tmp = pop_if_stack(p);
1583 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1584 else_inst = tmp;
1585 tmp = pop_if_stack(p);
1586 }
1587 if_inst = tmp;
1588
1589 if (!emit_endif) {
1590 /* ENDIF is useless; don't bother emitting it. */
1591 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1592 return;
1593 }
1594
1595 if (devinfo->gen < 6) {
1596 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1597 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1598 brw_set_src1(p, insn, brw_imm_d(0x0));
1599 } else if (devinfo->gen == 6) {
1600 brw_set_dest(p, insn, brw_imm_w(0));
1601 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1602 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1603 } else if (devinfo->gen == 7) {
1604 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1605 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1606 brw_set_src1(p, insn, brw_imm_w(0));
1607 } else {
1608 brw_set_src0(p, insn, brw_imm_d(0));
1609 }
1610
1611 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1612 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1613 if (devinfo->gen < 6)
1614 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1615
1616 /* Also pop item off the stack in the endif instruction: */
1617 if (devinfo->gen < 6) {
1618 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1619 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1620 } else if (devinfo->gen == 6) {
1621 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1622 } else {
1623 brw_inst_set_jip(devinfo, insn, 2);
1624 }
1625 patch_IF_ELSE(p, if_inst, else_inst, insn);
1626 }
1627
1628 brw_inst *
1629 brw_BREAK(struct brw_codegen *p)
1630 {
1631 const struct gen_device_info *devinfo = p->devinfo;
1632 brw_inst *insn;
1633
1634 insn = next_insn(p, BRW_OPCODE_BREAK);
1635 if (devinfo->gen >= 8) {
1636 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1637 brw_set_src0(p, insn, brw_imm_d(0x0));
1638 } else if (devinfo->gen >= 6) {
1639 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1640 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1641 brw_set_src1(p, insn, brw_imm_d(0x0));
1642 } else {
1643 brw_set_dest(p, insn, brw_ip_reg());
1644 brw_set_src0(p, insn, brw_ip_reg());
1645 brw_set_src1(p, insn, brw_imm_d(0x0));
1646 brw_inst_set_gen4_pop_count(devinfo, insn,
1647 p->if_depth_in_loop[p->loop_stack_depth]);
1648 }
1649 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1650 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1651
1652 return insn;
1653 }
1654
1655 brw_inst *
1656 brw_CONT(struct brw_codegen *p)
1657 {
1658 const struct gen_device_info *devinfo = p->devinfo;
1659 brw_inst *insn;
1660
1661 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1662 brw_set_dest(p, insn, brw_ip_reg());
1663 if (devinfo->gen >= 8) {
1664 brw_set_src0(p, insn, brw_imm_d(0x0));
1665 } else {
1666 brw_set_src0(p, insn, brw_ip_reg());
1667 brw_set_src1(p, insn, brw_imm_d(0x0));
1668 }
1669
1670 if (devinfo->gen < 6) {
1671 brw_inst_set_gen4_pop_count(devinfo, insn,
1672 p->if_depth_in_loop[p->loop_stack_depth]);
1673 }
1674 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1675 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1676 return insn;
1677 }
1678
1679 brw_inst *
1680 gen6_HALT(struct brw_codegen *p)
1681 {
1682 const struct gen_device_info *devinfo = p->devinfo;
1683 brw_inst *insn;
1684
1685 insn = next_insn(p, BRW_OPCODE_HALT);
1686 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1687 if (devinfo->gen >= 8) {
1688 brw_set_src0(p, insn, brw_imm_d(0x0));
1689 } else {
1690 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1691 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1692 }
1693
1694 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1695 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1696 return insn;
1697 }
1698
1699 /* DO/WHILE loop:
1700 *
1701 * The DO/WHILE is just an unterminated loop -- break or continue are
1702 * used for control within the loop. We have a few ways they can be
1703 * done.
1704 *
1705 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1706 * jip and no DO instruction.
1707 *
1708 * For non-uniform control flow pre-gen6, there's a DO instruction to
1709 * push the mask, and a WHILE to jump back, and BREAK to get out and
1710 * pop the mask.
1711 *
1712 * For gen6, there's no more mask stack, so no need for DO. WHILE
1713 * just points back to the first instruction of the loop.
1714 */
1715 brw_inst *
1716 brw_DO(struct brw_codegen *p, unsigned execute_size)
1717 {
1718 const struct gen_device_info *devinfo = p->devinfo;
1719
1720 if (devinfo->gen >= 6 || p->single_program_flow) {
1721 push_loop_stack(p, &p->store[p->nr_insn]);
1722 return &p->store[p->nr_insn];
1723 } else {
1724 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1725
1726 push_loop_stack(p, insn);
1727
1728 /* Override the defaults for this instruction:
1729 */
1730 brw_set_dest(p, insn, brw_null_reg());
1731 brw_set_src0(p, insn, brw_null_reg());
1732 brw_set_src1(p, insn, brw_null_reg());
1733
1734 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1735 brw_inst_set_exec_size(devinfo, insn, execute_size);
1736 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1737
1738 return insn;
1739 }
1740 }
1741
1742 /**
1743 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1744 * instruction here.
1745 *
1746 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1747 * nesting, since it can always just point to the end of the block/current loop.
1748 */
1749 static void
1750 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1751 {
1752 const struct gen_device_info *devinfo = p->devinfo;
1753 brw_inst *do_inst = get_inner_do_insn(p);
1754 brw_inst *inst;
1755 unsigned br = brw_jump_scale(devinfo);
1756
1757 assert(devinfo->gen < 6);
1758
1759 for (inst = while_inst - 1; inst != do_inst; inst--) {
1760 /* If the jump count is != 0, that means that this instruction has already
1761 * been patched because it's part of a loop inside of the one we're
1762 * patching.
1763 */
1764 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1765 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1766 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1767 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1768 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1769 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1770 }
1771 }
1772 }
1773
1774 brw_inst *
1775 brw_WHILE(struct brw_codegen *p)
1776 {
1777 const struct gen_device_info *devinfo = p->devinfo;
1778 brw_inst *insn, *do_insn;
1779 unsigned br = brw_jump_scale(devinfo);
1780
1781 if (devinfo->gen >= 6) {
1782 insn = next_insn(p, BRW_OPCODE_WHILE);
1783 do_insn = get_inner_do_insn(p);
1784
1785 if (devinfo->gen >= 8) {
1786 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1787 brw_set_src0(p, insn, brw_imm_d(0));
1788 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1789 } else if (devinfo->gen == 7) {
1790 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1791 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1792 brw_set_src1(p, insn, brw_imm_w(0));
1793 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1794 } else {
1795 brw_set_dest(p, insn, brw_imm_w(0));
1796 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1797 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1798 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1799 }
1800
1801 brw_inst_set_exec_size(devinfo, insn, brw_get_default_exec_size(p));
1802
1803 } else {
1804 if (p->single_program_flow) {
1805 insn = next_insn(p, BRW_OPCODE_ADD);
1806 do_insn = get_inner_do_insn(p);
1807
1808 brw_set_dest(p, insn, brw_ip_reg());
1809 brw_set_src0(p, insn, brw_ip_reg());
1810 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1811 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1812 } else {
1813 insn = next_insn(p, BRW_OPCODE_WHILE);
1814 do_insn = get_inner_do_insn(p);
1815
1816 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1817
1818 brw_set_dest(p, insn, brw_ip_reg());
1819 brw_set_src0(p, insn, brw_ip_reg());
1820 brw_set_src1(p, insn, brw_imm_d(0));
1821
1822 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1823 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1824 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1825
1826 brw_patch_break_cont(p, insn);
1827 }
1828 }
1829 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1830
1831 p->loop_stack_depth--;
1832
1833 return insn;
1834 }
1835
1836 /* FORWARD JUMPS:
1837 */
1838 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1839 {
1840 const struct gen_device_info *devinfo = p->devinfo;
1841 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1842 unsigned jmpi = 1;
1843
1844 if (devinfo->gen >= 5)
1845 jmpi = 2;
1846
1847 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1848 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1849
1850 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1851 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1852 }
1853
1854 /* To integrate with the above, it makes sense that the comparison
1855 * instruction should populate the flag register. It might be simpler
1856 * just to use the flag reg for most WM tasks?
1857 */
1858 void brw_CMP(struct brw_codegen *p,
1859 struct brw_reg dest,
1860 unsigned conditional,
1861 struct brw_reg src0,
1862 struct brw_reg src1)
1863 {
1864 const struct gen_device_info *devinfo = p->devinfo;
1865 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1866
1867 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1868 brw_set_dest(p, insn, dest);
1869 brw_set_src0(p, insn, src0);
1870 brw_set_src1(p, insn, src1);
1871
1872 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1873 * page says:
1874 * "Any CMP instruction with a null destination must use a {switch}."
1875 *
1876 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1877 * mentioned on their work-arounds pages.
1878 */
1879 if (devinfo->gen == 7) {
1880 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1881 dest.nr == BRW_ARF_NULL) {
1882 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1883 }
1884 }
1885 }
1886
1887 /***********************************************************************
1888 * Helpers for the various SEND message types:
1889 */
1890
1891 /** Extended math function, float[8].
1892 */
1893 void gen4_math(struct brw_codegen *p,
1894 struct brw_reg dest,
1895 unsigned function,
1896 unsigned msg_reg_nr,
1897 struct brw_reg src,
1898 unsigned precision )
1899 {
1900 const struct gen_device_info *devinfo = p->devinfo;
1901 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1902 unsigned data_type;
1903 if (has_scalar_region(src)) {
1904 data_type = BRW_MATH_DATA_SCALAR;
1905 } else {
1906 data_type = BRW_MATH_DATA_VECTOR;
1907 }
1908
1909 assert(devinfo->gen < 6);
1910
1911 /* Example code doesn't set predicate_control for send
1912 * instructions.
1913 */
1914 brw_inst_set_pred_control(devinfo, insn, 0);
1915 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1916
1917 brw_set_dest(p, insn, dest);
1918 brw_set_src0(p, insn, src);
1919 brw_set_math_message(p,
1920 insn,
1921 function,
1922 src.type == BRW_REGISTER_TYPE_D,
1923 precision,
1924 data_type);
1925 }
1926
1927 void gen6_math(struct brw_codegen *p,
1928 struct brw_reg dest,
1929 unsigned function,
1930 struct brw_reg src0,
1931 struct brw_reg src1)
1932 {
1933 const struct gen_device_info *devinfo = p->devinfo;
1934 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1935
1936 assert(devinfo->gen >= 6);
1937
1938 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1939 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1940
1941 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1942 if (devinfo->gen == 6) {
1943 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1944 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1945 }
1946
1947 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1948 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1949 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1950 assert(src0.type != BRW_REGISTER_TYPE_F);
1951 assert(src1.type != BRW_REGISTER_TYPE_F);
1952 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1953 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1954 } else {
1955 assert(src0.type == BRW_REGISTER_TYPE_F);
1956 assert(src1.type == BRW_REGISTER_TYPE_F);
1957 }
1958
1959 /* Source modifiers are ignored for extended math instructions on Gen6. */
1960 if (devinfo->gen == 6) {
1961 assert(!src0.negate);
1962 assert(!src0.abs);
1963 assert(!src1.negate);
1964 assert(!src1.abs);
1965 }
1966
1967 brw_inst_set_math_function(devinfo, insn, function);
1968
1969 brw_set_dest(p, insn, dest);
1970 brw_set_src0(p, insn, src0);
1971 brw_set_src1(p, insn, src1);
1972 }
1973
1974 /**
1975 * Return the right surface index to access the thread scratch space using
1976 * stateless dataport messages.
1977 */
1978 unsigned
1979 brw_scratch_surface_idx(const struct brw_codegen *p)
1980 {
1981 /* The scratch space is thread-local so IA coherency is unnecessary. */
1982 if (p->devinfo->gen >= 8)
1983 return GEN8_BTI_STATELESS_NON_COHERENT;
1984 else
1985 return BRW_BTI_STATELESS;
1986 }
1987
1988 /**
1989 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1990 * using a constant offset per channel.
1991 *
1992 * The offset must be aligned to oword size (16 bytes). Used for
1993 * register spilling.
1994 */
1995 void brw_oword_block_write_scratch(struct brw_codegen *p,
1996 struct brw_reg mrf,
1997 int num_regs,
1998 unsigned offset)
1999 {
2000 const struct gen_device_info *devinfo = p->devinfo;
2001 const unsigned target_cache =
2002 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2003 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2004 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2005 uint32_t msg_type;
2006
2007 if (devinfo->gen >= 6)
2008 offset /= 16;
2009
2010 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2011
2012 const unsigned mlen = 1 + num_regs;
2013
2014 /* Set up the message header. This is g0, with g0.2 filled with
2015 * the offset. We don't want to leave our offset around in g0 or
2016 * it'll screw up texture samples, so set it up inside the message
2017 * reg.
2018 */
2019 {
2020 brw_push_insn_state(p);
2021 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2022 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2023 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2024
2025 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2026
2027 /* set message header global offset field (reg 0, element 2) */
2028 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2029 brw_MOV(p,
2030 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2031 mrf.nr,
2032 2), BRW_REGISTER_TYPE_UD),
2033 brw_imm_ud(offset));
2034
2035 brw_pop_insn_state(p);
2036 }
2037
2038 {
2039 struct brw_reg dest;
2040 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2041 int send_commit_msg;
2042 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2043 BRW_REGISTER_TYPE_UW);
2044
2045 brw_inst_set_compression(devinfo, insn, false);
2046
2047 if (brw_inst_exec_size(devinfo, insn) >= 16)
2048 src_header = vec16(src_header);
2049
2050 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2051 if (devinfo->gen < 6)
2052 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2053
2054 /* Until gen6, writes followed by reads from the same location
2055 * are not guaranteed to be ordered unless write_commit is set.
2056 * If set, then a no-op write is issued to the destination
2057 * register to set a dependency, and a read from the destination
2058 * can be used to ensure the ordering.
2059 *
2060 * For gen6, only writes between different threads need ordering
2061 * protection. Our use of DP writes is all about register
2062 * spilling within a thread.
2063 */
2064 if (devinfo->gen >= 6) {
2065 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2066 send_commit_msg = 0;
2067 } else {
2068 dest = src_header;
2069 send_commit_msg = 1;
2070 }
2071
2072 brw_set_dest(p, insn, dest);
2073 if (devinfo->gen >= 6) {
2074 brw_set_src0(p, insn, mrf);
2075 } else {
2076 brw_set_src0(p, insn, brw_null_reg());
2077 }
2078
2079 if (devinfo->gen >= 6)
2080 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2081 else
2082 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2083
2084 brw_set_dp_write_message(p,
2085 insn,
2086 brw_scratch_surface_idx(p),
2087 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2088 msg_type,
2089 target_cache,
2090 mlen,
2091 true, /* header_present */
2092 0, /* not a render target */
2093 send_commit_msg, /* response_length */
2094 0, /* eot */
2095 send_commit_msg);
2096 }
2097 }
2098
2099
2100 /**
2101 * Read a block of owords (half a GRF each) from the scratch buffer
2102 * using a constant index per channel.
2103 *
2104 * Offset must be aligned to oword size (16 bytes). Used for register
2105 * spilling.
2106 */
2107 void
2108 brw_oword_block_read_scratch(struct brw_codegen *p,
2109 struct brw_reg dest,
2110 struct brw_reg mrf,
2111 int num_regs,
2112 unsigned offset)
2113 {
2114 const struct gen_device_info *devinfo = p->devinfo;
2115
2116 if (devinfo->gen >= 6)
2117 offset /= 16;
2118
2119 if (p->devinfo->gen >= 7) {
2120 /* On gen 7 and above, we no longer have message registers and we can
2121 * send from any register we want. By using the destination register
2122 * for the message, we guarantee that the implied message write won't
2123 * accidentally overwrite anything. This has been a problem because
2124 * the MRF registers and source for the final FB write are both fixed
2125 * and may overlap.
2126 */
2127 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2128 } else {
2129 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2130 }
2131 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2132
2133 const unsigned rlen = num_regs;
2134 const unsigned target_cache =
2135 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2136 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2137 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2138
2139 {
2140 brw_push_insn_state(p);
2141 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2142 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2143 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2144
2145 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2146
2147 /* set message header global offset field (reg 0, element 2) */
2148 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2149 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2150
2151 brw_pop_insn_state(p);
2152 }
2153
2154 {
2155 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2156
2157 assert(brw_inst_pred_control(devinfo, insn) == 0);
2158 brw_inst_set_compression(devinfo, insn, false);
2159
2160 brw_set_dest(p, insn, dest); /* UW? */
2161 if (devinfo->gen >= 6) {
2162 brw_set_src0(p, insn, mrf);
2163 } else {
2164 brw_set_src0(p, insn, brw_null_reg());
2165 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2166 }
2167
2168 brw_set_dp_read_message(p,
2169 insn,
2170 brw_scratch_surface_idx(p),
2171 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2172 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2173 target_cache,
2174 1, /* msg_length */
2175 true, /* header_present */
2176 rlen);
2177 }
2178 }
2179
2180 void
2181 gen7_block_read_scratch(struct brw_codegen *p,
2182 struct brw_reg dest,
2183 int num_regs,
2184 unsigned offset)
2185 {
2186 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2187 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2188
2189 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2190
2191 /* The HW requires that the header is present; this is to get the g0.5
2192 * scratch offset.
2193 */
2194 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2195
2196 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2197 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2198 * is 32 bytes, which happens to be the size of a register.
2199 */
2200 offset /= REG_SIZE;
2201 assert(offset < (1 << 12));
2202
2203 gen7_set_dp_scratch_message(p, insn,
2204 false, /* scratch read */
2205 false, /* OWords */
2206 false, /* invalidate after read */
2207 num_regs,
2208 offset,
2209 1, /* mlen: just g0 */
2210 num_regs, /* rlen */
2211 true); /* header present */
2212 }
2213
2214 /**
2215 * Read float[4] vectors from the data port constant cache.
2216 * Location (in buffer) should be a multiple of 16.
2217 * Used for fetching shader constants.
2218 */
2219 void brw_oword_block_read(struct brw_codegen *p,
2220 struct brw_reg dest,
2221 struct brw_reg mrf,
2222 uint32_t offset,
2223 uint32_t bind_table_index)
2224 {
2225 const struct gen_device_info *devinfo = p->devinfo;
2226 const unsigned target_cache =
2227 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2228 BRW_DATAPORT_READ_TARGET_DATA_CACHE);
2229 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
2230
2231 /* On newer hardware, offset is in units of owords. */
2232 if (devinfo->gen >= 6)
2233 offset /= 16;
2234
2235 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2236
2237 brw_push_insn_state(p);
2238 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2239 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2240 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2241
2242 brw_push_insn_state(p);
2243 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2244 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2245
2246 /* set message header global offset field (reg 0, element 2) */
2247 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2248 brw_MOV(p,
2249 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2250 mrf.nr,
2251 2), BRW_REGISTER_TYPE_UD),
2252 brw_imm_ud(offset));
2253 brw_pop_insn_state(p);
2254
2255 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2256
2257 /* cast dest to a uword[8] vector */
2258 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2259
2260 brw_set_dest(p, insn, dest);
2261 if (devinfo->gen >= 6) {
2262 brw_set_src0(p, insn, mrf);
2263 } else {
2264 brw_set_src0(p, insn, brw_null_reg());
2265 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2266 }
2267
2268 brw_set_dp_read_message(p, insn, bind_table_index,
2269 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2270 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2271 target_cache,
2272 1, /* msg_length */
2273 true, /* header_present */
2274 DIV_ROUND_UP(exec_size, 8)); /* response_length */
2275
2276 brw_pop_insn_state(p);
2277 }
2278
2279 brw_inst *
2280 brw_fb_WRITE(struct brw_codegen *p,
2281 struct brw_reg payload,
2282 struct brw_reg implied_header,
2283 unsigned msg_control,
2284 unsigned binding_table_index,
2285 unsigned msg_length,
2286 unsigned response_length,
2287 bool eot,
2288 bool last_render_target,
2289 bool header_present)
2290 {
2291 const struct gen_device_info *devinfo = p->devinfo;
2292 const unsigned target_cache =
2293 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2294 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2295 brw_inst *insn;
2296 unsigned msg_type;
2297 struct brw_reg dest, src0;
2298
2299 if (brw_get_default_exec_size(p) >= BRW_EXECUTE_16)
2300 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2301 else
2302 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2303
2304 if (devinfo->gen >= 6) {
2305 insn = next_insn(p, BRW_OPCODE_SENDC);
2306 } else {
2307 insn = next_insn(p, BRW_OPCODE_SEND);
2308 }
2309 brw_inst_set_compression(devinfo, insn, false);
2310
2311 if (devinfo->gen >= 6) {
2312 /* headerless version, just submit color payload */
2313 src0 = payload;
2314
2315 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2316 } else {
2317 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2318 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2319 src0 = implied_header;
2320
2321 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2322 }
2323
2324 brw_set_dest(p, insn, dest);
2325 brw_set_src0(p, insn, src0);
2326 brw_set_dp_write_message(p,
2327 insn,
2328 binding_table_index,
2329 msg_control,
2330 msg_type,
2331 target_cache,
2332 msg_length,
2333 header_present,
2334 last_render_target,
2335 response_length,
2336 eot,
2337 0 /* send_commit_msg */);
2338
2339 return insn;
2340 }
2341
2342 brw_inst *
2343 gen9_fb_READ(struct brw_codegen *p,
2344 struct brw_reg dst,
2345 struct brw_reg payload,
2346 unsigned binding_table_index,
2347 unsigned msg_length,
2348 unsigned response_length,
2349 bool per_sample)
2350 {
2351 const struct gen_device_info *devinfo = p->devinfo;
2352 assert(devinfo->gen >= 9);
2353 const unsigned msg_subtype =
2354 brw_get_default_exec_size(p) == BRW_EXECUTE_16 ? 0 : 1;
2355 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2356
2357 brw_set_dest(p, insn, dst);
2358 brw_set_src0(p, insn, payload);
2359 brw_set_dp_read_message(p, insn, binding_table_index,
2360 per_sample << 5 | msg_subtype,
2361 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2362 GEN6_SFID_DATAPORT_RENDER_CACHE,
2363 msg_length, true /* header_present */,
2364 response_length);
2365 brw_inst_set_rt_slot_group(devinfo, insn, brw_get_default_group(p) / 16);
2366
2367 return insn;
2368 }
2369
2370 /**
2371 * Texture sample instruction.
2372 * Note: the msg_type plus msg_length values determine exactly what kind
2373 * of sampling operation is performed. See volume 4, page 161 of docs.
2374 */
2375 void brw_SAMPLE(struct brw_codegen *p,
2376 struct brw_reg dest,
2377 unsigned msg_reg_nr,
2378 struct brw_reg src0,
2379 unsigned binding_table_index,
2380 unsigned sampler,
2381 unsigned msg_type,
2382 unsigned response_length,
2383 unsigned msg_length,
2384 unsigned header_present,
2385 unsigned simd_mode,
2386 unsigned return_format)
2387 {
2388 const struct gen_device_info *devinfo = p->devinfo;
2389 brw_inst *insn;
2390
2391 if (msg_reg_nr != -1)
2392 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2393
2394 insn = next_insn(p, BRW_OPCODE_SEND);
2395 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2396
2397 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2398 *
2399 * "Instruction compression is not allowed for this instruction (that
2400 * is, send). The hardware behavior is undefined if this instruction is
2401 * set as compressed. However, compress control can be set to "SecHalf"
2402 * to affect the EMask generation."
2403 *
2404 * No similar wording is found in later PRMs, but there are examples
2405 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2406 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2407 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2408 */
2409 brw_inst_set_compression(devinfo, insn, false);
2410
2411 if (devinfo->gen < 6)
2412 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2413
2414 brw_set_dest(p, insn, dest);
2415 brw_set_src0(p, insn, src0);
2416 brw_set_sampler_message(p, insn,
2417 binding_table_index,
2418 sampler,
2419 msg_type,
2420 response_length,
2421 msg_length,
2422 header_present,
2423 simd_mode,
2424 return_format);
2425 }
2426
2427 /* Adjust the message header's sampler state pointer to
2428 * select the correct group of 16 samplers.
2429 */
2430 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2431 struct brw_reg header,
2432 struct brw_reg sampler_index)
2433 {
2434 /* The "Sampler Index" field can only store values between 0 and 15.
2435 * However, we can add an offset to the "Sampler State Pointer"
2436 * field, effectively selecting a different set of 16 samplers.
2437 *
2438 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2439 * offset, and each sampler state is only 16-bytes, so we can't
2440 * exclusively use the offset - we have to use both.
2441 */
2442
2443 const struct gen_device_info *devinfo = p->devinfo;
2444
2445 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2446 const int sampler_state_size = 16; /* 16 bytes */
2447 uint32_t sampler = sampler_index.ud;
2448
2449 if (sampler >= 16) {
2450 assert(devinfo->is_haswell || devinfo->gen >= 8);
2451 brw_ADD(p,
2452 get_element_ud(header, 3),
2453 get_element_ud(brw_vec8_grf(0, 0), 3),
2454 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2455 }
2456 } else {
2457 /* Non-const sampler array indexing case */
2458 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2459 return;
2460 }
2461
2462 struct brw_reg temp = get_element_ud(header, 3);
2463
2464 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2465 brw_SHL(p, temp, temp, brw_imm_ud(4));
2466 brw_ADD(p,
2467 get_element_ud(header, 3),
2468 get_element_ud(brw_vec8_grf(0, 0), 3),
2469 temp);
2470 }
2471 }
2472
2473 /* All these variables are pretty confusing - we might be better off
2474 * using bitmasks and macros for this, in the old style. Or perhaps
2475 * just having the caller instantiate the fields in dword3 itself.
2476 */
2477 void brw_urb_WRITE(struct brw_codegen *p,
2478 struct brw_reg dest,
2479 unsigned msg_reg_nr,
2480 struct brw_reg src0,
2481 enum brw_urb_write_flags flags,
2482 unsigned msg_length,
2483 unsigned response_length,
2484 unsigned offset,
2485 unsigned swizzle)
2486 {
2487 const struct gen_device_info *devinfo = p->devinfo;
2488 brw_inst *insn;
2489
2490 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2491
2492 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2493 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2494 brw_push_insn_state(p);
2495 brw_set_default_access_mode(p, BRW_ALIGN_1);
2496 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2497 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2498 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2499 BRW_REGISTER_TYPE_UD),
2500 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2501 brw_imm_ud(0xff00));
2502 brw_pop_insn_state(p);
2503 }
2504
2505 insn = next_insn(p, BRW_OPCODE_SEND);
2506
2507 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2508
2509 brw_set_dest(p, insn, dest);
2510 brw_set_src0(p, insn, src0);
2511 brw_set_src1(p, insn, brw_imm_d(0));
2512
2513 if (devinfo->gen < 6)
2514 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2515
2516 brw_set_urb_message(p,
2517 insn,
2518 flags,
2519 msg_length,
2520 response_length,
2521 offset,
2522 swizzle);
2523 }
2524
2525 struct brw_inst *
2526 brw_send_indirect_message(struct brw_codegen *p,
2527 unsigned sfid,
2528 struct brw_reg dst,
2529 struct brw_reg payload,
2530 struct brw_reg desc)
2531 {
2532 const struct gen_device_info *devinfo = p->devinfo;
2533 struct brw_inst *send;
2534 int setup;
2535
2536 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2537
2538 assert(desc.type == BRW_REGISTER_TYPE_UD);
2539
2540 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2541 * in the indirect case) by its index in the instruction store. The
2542 * pointer returned by next_insn() may become invalid if emitting the SEND
2543 * in the indirect case reallocs the store.
2544 */
2545
2546 if (desc.file == BRW_IMMEDIATE_VALUE) {
2547 setup = p->nr_insn;
2548 send = next_insn(p, BRW_OPCODE_SEND);
2549 brw_set_src1(p, send, desc);
2550
2551 } else {
2552 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2553
2554 brw_push_insn_state(p);
2555 brw_set_default_access_mode(p, BRW_ALIGN_1);
2556 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2557 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2558 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2559
2560 /* Load the indirect descriptor to an address register using OR so the
2561 * caller can specify additional descriptor bits with the usual
2562 * brw_set_*_message() helper functions.
2563 */
2564 setup = p->nr_insn;
2565 brw_OR(p, addr, desc, brw_imm_ud(0));
2566
2567 brw_pop_insn_state(p);
2568
2569 send = next_insn(p, BRW_OPCODE_SEND);
2570 brw_set_src1(p, send, addr);
2571 }
2572
2573 if (dst.width < BRW_EXECUTE_8)
2574 brw_inst_set_exec_size(devinfo, send, dst.width);
2575
2576 brw_set_dest(p, send, dst);
2577 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2578 brw_inst_set_sfid(devinfo, send, sfid);
2579
2580 return &p->store[setup];
2581 }
2582
2583 static struct brw_inst *
2584 brw_send_indirect_surface_message(struct brw_codegen *p,
2585 unsigned sfid,
2586 struct brw_reg dst,
2587 struct brw_reg payload,
2588 struct brw_reg surface,
2589 unsigned message_len,
2590 unsigned response_len,
2591 bool header_present)
2592 {
2593 const struct gen_device_info *devinfo = p->devinfo;
2594 struct brw_inst *insn;
2595
2596 if (surface.file != BRW_IMMEDIATE_VALUE) {
2597 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2598
2599 brw_push_insn_state(p);
2600 brw_set_default_access_mode(p, BRW_ALIGN_1);
2601 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2602 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2603 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2604
2605 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2606 * some surface array is accessed out of bounds.
2607 */
2608 insn = brw_AND(p, addr,
2609 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2610 BRW_GET_SWZ(surface.swizzle, 0)),
2611 brw_imm_ud(0xff));
2612
2613 brw_pop_insn_state(p);
2614
2615 surface = addr;
2616 }
2617
2618 insn = brw_send_indirect_message(p, sfid, dst, payload, surface);
2619 brw_inst_set_mlen(devinfo, insn, message_len);
2620 brw_inst_set_rlen(devinfo, insn, response_len);
2621 brw_inst_set_header_present(devinfo, insn, header_present);
2622
2623 return insn;
2624 }
2625
2626 static bool
2627 while_jumps_before_offset(const struct gen_device_info *devinfo,
2628 brw_inst *insn, int while_offset, int start_offset)
2629 {
2630 int scale = 16 / brw_jump_scale(devinfo);
2631 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2632 : brw_inst_jip(devinfo, insn);
2633 assert(jip < 0);
2634 return while_offset + jip * scale <= start_offset;
2635 }
2636
2637
2638 static int
2639 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2640 {
2641 int offset;
2642 void *store = p->store;
2643 const struct gen_device_info *devinfo = p->devinfo;
2644
2645 int depth = 0;
2646
2647 for (offset = next_offset(devinfo, store, start_offset);
2648 offset < p->next_insn_offset;
2649 offset = next_offset(devinfo, store, offset)) {
2650 brw_inst *insn = store + offset;
2651
2652 switch (brw_inst_opcode(devinfo, insn)) {
2653 case BRW_OPCODE_IF:
2654 depth++;
2655 break;
2656 case BRW_OPCODE_ENDIF:
2657 if (depth == 0)
2658 return offset;
2659 depth--;
2660 break;
2661 case BRW_OPCODE_WHILE:
2662 /* If the while doesn't jump before our instruction, it's the end
2663 * of a sibling do...while loop. Ignore it.
2664 */
2665 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2666 continue;
2667 /* fallthrough */
2668 case BRW_OPCODE_ELSE:
2669 case BRW_OPCODE_HALT:
2670 if (depth == 0)
2671 return offset;
2672 }
2673 }
2674
2675 return 0;
2676 }
2677
2678 /* There is no DO instruction on gen6, so to find the end of the loop
2679 * we have to see if the loop is jumping back before our start
2680 * instruction.
2681 */
2682 static int
2683 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2684 {
2685 const struct gen_device_info *devinfo = p->devinfo;
2686 int offset;
2687 void *store = p->store;
2688
2689 assert(devinfo->gen >= 6);
2690
2691 /* Always start after the instruction (such as a WHILE) we're trying to fix
2692 * up.
2693 */
2694 for (offset = next_offset(devinfo, store, start_offset);
2695 offset < p->next_insn_offset;
2696 offset = next_offset(devinfo, store, offset)) {
2697 brw_inst *insn = store + offset;
2698
2699 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2700 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2701 return offset;
2702 }
2703 }
2704 assert(!"not reached");
2705 return start_offset;
2706 }
2707
2708 /* After program generation, go back and update the UIP and JIP of
2709 * BREAK, CONT, and HALT instructions to their correct locations.
2710 */
2711 void
2712 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2713 {
2714 const struct gen_device_info *devinfo = p->devinfo;
2715 int offset;
2716 int br = brw_jump_scale(devinfo);
2717 int scale = 16 / br;
2718 void *store = p->store;
2719
2720 if (devinfo->gen < 6)
2721 return;
2722
2723 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2724 brw_inst *insn = store + offset;
2725 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2726
2727 int block_end_offset = brw_find_next_block_end(p, offset);
2728 switch (brw_inst_opcode(devinfo, insn)) {
2729 case BRW_OPCODE_BREAK:
2730 assert(block_end_offset != 0);
2731 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2732 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2733 brw_inst_set_uip(devinfo, insn,
2734 (brw_find_loop_end(p, offset) - offset +
2735 (devinfo->gen == 6 ? 16 : 0)) / scale);
2736 break;
2737 case BRW_OPCODE_CONTINUE:
2738 assert(block_end_offset != 0);
2739 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2740 brw_inst_set_uip(devinfo, insn,
2741 (brw_find_loop_end(p, offset) - offset) / scale);
2742
2743 assert(brw_inst_uip(devinfo, insn) != 0);
2744 assert(brw_inst_jip(devinfo, insn) != 0);
2745 break;
2746
2747 case BRW_OPCODE_ENDIF: {
2748 int32_t jump = (block_end_offset == 0) ?
2749 1 * br : (block_end_offset - offset) / scale;
2750 if (devinfo->gen >= 7)
2751 brw_inst_set_jip(devinfo, insn, jump);
2752 else
2753 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2754 break;
2755 }
2756
2757 case BRW_OPCODE_HALT:
2758 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2759 *
2760 * "In case of the halt instruction not inside any conditional
2761 * code block, the value of <JIP> and <UIP> should be the
2762 * same. In case of the halt instruction inside conditional code
2763 * block, the <UIP> should be the end of the program, and the
2764 * <JIP> should be end of the most inner conditional code block."
2765 *
2766 * The uip will have already been set by whoever set up the
2767 * instruction.
2768 */
2769 if (block_end_offset == 0) {
2770 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2771 } else {
2772 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2773 }
2774 assert(brw_inst_uip(devinfo, insn) != 0);
2775 assert(brw_inst_jip(devinfo, insn) != 0);
2776 break;
2777 }
2778 }
2779 }
2780
2781 void brw_ff_sync(struct brw_codegen *p,
2782 struct brw_reg dest,
2783 unsigned msg_reg_nr,
2784 struct brw_reg src0,
2785 bool allocate,
2786 unsigned response_length,
2787 bool eot)
2788 {
2789 const struct gen_device_info *devinfo = p->devinfo;
2790 brw_inst *insn;
2791
2792 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2793
2794 insn = next_insn(p, BRW_OPCODE_SEND);
2795 brw_set_dest(p, insn, dest);
2796 brw_set_src0(p, insn, src0);
2797 brw_set_src1(p, insn, brw_imm_d(0));
2798
2799 if (devinfo->gen < 6)
2800 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2801
2802 brw_set_ff_sync_message(p,
2803 insn,
2804 allocate,
2805 response_length,
2806 eot);
2807 }
2808
2809 /**
2810 * Emit the SEND instruction necessary to generate stream output data on Gen6
2811 * (for transform feedback).
2812 *
2813 * If send_commit_msg is true, this is the last piece of stream output data
2814 * from this thread, so send the data as a committed write. According to the
2815 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2816 *
2817 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2818 * writes are complete by sending the final write as a committed write."
2819 */
2820 void
2821 brw_svb_write(struct brw_codegen *p,
2822 struct brw_reg dest,
2823 unsigned msg_reg_nr,
2824 struct brw_reg src0,
2825 unsigned binding_table_index,
2826 bool send_commit_msg)
2827 {
2828 const struct gen_device_info *devinfo = p->devinfo;
2829 const unsigned target_cache =
2830 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2831 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2832 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2833 brw_inst *insn;
2834
2835 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2836
2837 insn = next_insn(p, BRW_OPCODE_SEND);
2838 brw_set_dest(p, insn, dest);
2839 brw_set_src0(p, insn, src0);
2840 brw_set_src1(p, insn, brw_imm_d(0));
2841 brw_set_dp_write_message(p, insn,
2842 binding_table_index,
2843 0, /* msg_control: ignored */
2844 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2845 target_cache,
2846 1, /* msg_length */
2847 true, /* header_present */
2848 0, /* last_render_target: ignored */
2849 send_commit_msg, /* response_length */
2850 0, /* end_of_thread */
2851 send_commit_msg); /* send_commit_msg */
2852 }
2853
2854 static unsigned
2855 brw_surface_payload_size(struct brw_codegen *p,
2856 unsigned num_channels,
2857 bool has_simd4x2,
2858 bool has_simd16)
2859 {
2860 if (has_simd4x2 && brw_get_default_access_mode(p) == BRW_ALIGN_16)
2861 return 1;
2862 else if (has_simd16 && brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2863 return 2 * num_channels;
2864 else
2865 return num_channels;
2866 }
2867
2868 static void
2869 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2870 brw_inst *insn,
2871 unsigned atomic_op,
2872 bool response_expected)
2873 {
2874 const struct gen_device_info *devinfo = p->devinfo;
2875 unsigned msg_control =
2876 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2877 (response_expected ? 1 << 5 : 0); /* Return data expected */
2878
2879 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2880 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2881 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2882 msg_control |= 1 << 4; /* SIMD8 mode */
2883
2884 brw_inst_set_dp_msg_type(devinfo, insn,
2885 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2886 } else {
2887 brw_inst_set_dp_msg_type(devinfo, insn,
2888 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2889 }
2890 } else {
2891 brw_inst_set_dp_msg_type(devinfo, insn,
2892 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2893
2894 if (brw_get_default_exec_size(p) != BRW_EXECUTE_16)
2895 msg_control |= 1 << 4; /* SIMD8 mode */
2896 }
2897
2898 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2899 }
2900
2901 void
2902 brw_untyped_atomic(struct brw_codegen *p,
2903 struct brw_reg dst,
2904 struct brw_reg payload,
2905 struct brw_reg surface,
2906 unsigned atomic_op,
2907 unsigned msg_length,
2908 bool response_expected,
2909 bool header_present)
2910 {
2911 const struct gen_device_info *devinfo = p->devinfo;
2912 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2913 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2914 GEN7_SFID_DATAPORT_DATA_CACHE);
2915 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
2916 /* Mask out unused components -- This is especially important in Align16
2917 * mode on generations that don't have native support for SIMD4x2 atomics,
2918 * because unused but enabled components will cause the dataport to perform
2919 * additional atomic operations on the addresses that happen to be in the
2920 * uninitialized Y, Z and W coordinates of the payload.
2921 */
2922 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2923 struct brw_inst *insn = brw_send_indirect_surface_message(
2924 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2925 brw_surface_payload_size(p, response_expected,
2926 devinfo->gen >= 8 || devinfo->is_haswell, true),
2927 header_present);
2928
2929 brw_set_dp_untyped_atomic_message(
2930 p, insn, atomic_op, response_expected);
2931 }
2932
2933 static void
2934 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2935 struct brw_inst *insn,
2936 unsigned num_channels)
2937 {
2938 const struct gen_device_info *devinfo = p->devinfo;
2939 /* Set mask of 32-bit channels to drop. */
2940 unsigned msg_control = 0xf & (0xf << num_channels);
2941
2942 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2943 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2944 msg_control |= 1 << 4; /* SIMD16 mode */
2945 else
2946 msg_control |= 2 << 4; /* SIMD8 mode */
2947 }
2948
2949 brw_inst_set_dp_msg_type(devinfo, insn,
2950 (devinfo->gen >= 8 || devinfo->is_haswell ?
2951 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2952 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
2953 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2954 }
2955
2956 void
2957 brw_untyped_surface_read(struct brw_codegen *p,
2958 struct brw_reg dst,
2959 struct brw_reg payload,
2960 struct brw_reg surface,
2961 unsigned msg_length,
2962 unsigned num_channels)
2963 {
2964 const struct gen_device_info *devinfo = p->devinfo;
2965 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2966 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2967 GEN7_SFID_DATAPORT_DATA_CACHE);
2968 struct brw_inst *insn = brw_send_indirect_surface_message(
2969 p, sfid, dst, payload, surface, msg_length,
2970 brw_surface_payload_size(p, num_channels, true, true),
2971 false);
2972
2973 brw_set_dp_untyped_surface_read_message(
2974 p, insn, num_channels);
2975 }
2976
2977 static void
2978 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
2979 struct brw_inst *insn,
2980 unsigned num_channels)
2981 {
2982 const struct gen_device_info *devinfo = p->devinfo;
2983 /* Set mask of 32-bit channels to drop. */
2984 unsigned msg_control = 0xf & (0xf << num_channels);
2985
2986 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
2987 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
2988 msg_control |= 1 << 4; /* SIMD16 mode */
2989 else
2990 msg_control |= 2 << 4; /* SIMD8 mode */
2991 } else {
2992 if (devinfo->gen >= 8 || devinfo->is_haswell)
2993 msg_control |= 0 << 4; /* SIMD4x2 mode */
2994 else
2995 msg_control |= 2 << 4; /* SIMD8 mode */
2996 }
2997
2998 brw_inst_set_dp_msg_type(devinfo, insn,
2999 devinfo->gen >= 8 || devinfo->is_haswell ?
3000 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
3001 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
3002 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3003 }
3004
3005 void
3006 brw_untyped_surface_write(struct brw_codegen *p,
3007 struct brw_reg payload,
3008 struct brw_reg surface,
3009 unsigned msg_length,
3010 unsigned num_channels,
3011 bool header_present)
3012 {
3013 const struct gen_device_info *devinfo = p->devinfo;
3014 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3015 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3016 GEN7_SFID_DATAPORT_DATA_CACHE);
3017 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3018 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3019 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3020 WRITEMASK_X : WRITEMASK_XYZW;
3021 struct brw_inst *insn = brw_send_indirect_surface_message(
3022 p, sfid, brw_writemask(brw_null_reg(), mask),
3023 payload, surface, msg_length, 0, header_present);
3024
3025 brw_set_dp_untyped_surface_write_message(
3026 p, insn, num_channels);
3027 }
3028
3029 static unsigned
3030 brw_byte_scattered_data_element_from_bit_size(unsigned bit_size)
3031 {
3032 switch (bit_size) {
3033 case 8:
3034 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_BYTE;
3035 case 16:
3036 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_WORD;
3037 case 32:
3038 return GEN7_BYTE_SCATTERED_DATA_ELEMENT_DWORD;
3039 default:
3040 unreachable("Unsupported bit_size for byte scattered messages");
3041 }
3042 }
3043
3044
3045 void
3046 brw_byte_scattered_read(struct brw_codegen *p,
3047 struct brw_reg dst,
3048 struct brw_reg payload,
3049 struct brw_reg surface,
3050 unsigned msg_length,
3051 unsigned bit_size)
3052 {
3053 const struct gen_device_info *devinfo = p->devinfo;
3054 assert(devinfo->gen > 7 || devinfo->is_haswell);
3055 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3056 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3057
3058 struct brw_inst *insn = brw_send_indirect_surface_message(
3059 p, sfid, dst, payload, surface, msg_length,
3060 brw_surface_payload_size(p, 1, true, true),
3061 false);
3062
3063 unsigned msg_control =
3064 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3065
3066 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3067 msg_control |= 1; /* SIMD16 mode */
3068 else
3069 msg_control |= 0; /* SIMD8 mode */
3070
3071 brw_inst_set_dp_msg_type(devinfo, insn,
3072 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ);
3073 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3074 }
3075
3076 void
3077 brw_byte_scattered_write(struct brw_codegen *p,
3078 struct brw_reg payload,
3079 struct brw_reg surface,
3080 unsigned msg_length,
3081 unsigned bit_size,
3082 bool header_present)
3083 {
3084 const struct gen_device_info *devinfo = p->devinfo;
3085 assert(devinfo->gen > 7 || devinfo->is_haswell);
3086 assert(brw_get_default_access_mode(p) == BRW_ALIGN_1);
3087 const unsigned sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
3088
3089 struct brw_inst *insn = brw_send_indirect_surface_message(
3090 p, sfid, brw_writemask(brw_null_reg(), WRITEMASK_XYZW),
3091 payload, surface, msg_length, 0, header_present);
3092
3093 unsigned msg_control =
3094 brw_byte_scattered_data_element_from_bit_size(bit_size) << 2;
3095
3096 if (brw_get_default_exec_size(p) == BRW_EXECUTE_16)
3097 msg_control |= 1;
3098 else
3099 msg_control |= 0;
3100
3101 brw_inst_set_dp_msg_type(devinfo, insn,
3102 HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE);
3103 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3104 }
3105
3106 static void
3107 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3108 struct brw_inst *insn,
3109 unsigned atomic_op,
3110 bool response_expected)
3111 {
3112 const struct gen_device_info *devinfo = p->devinfo;
3113 unsigned msg_control =
3114 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3115 (response_expected ? 1 << 5 : 0); /* Return data expected */
3116
3117 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3118 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3119 if ((brw_get_default_group(p) / 8) % 2 == 1)
3120 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3121
3122 brw_inst_set_dp_msg_type(devinfo, insn,
3123 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3124 } else {
3125 brw_inst_set_dp_msg_type(devinfo, insn,
3126 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3127 }
3128
3129 } else {
3130 brw_inst_set_dp_msg_type(devinfo, insn,
3131 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3132
3133 if ((brw_get_default_group(p) / 8) % 2 == 1)
3134 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3135 }
3136
3137 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3138 }
3139
3140 void
3141 brw_typed_atomic(struct brw_codegen *p,
3142 struct brw_reg dst,
3143 struct brw_reg payload,
3144 struct brw_reg surface,
3145 unsigned atomic_op,
3146 unsigned msg_length,
3147 bool response_expected,
3148 bool header_present) {
3149 const struct gen_device_info *devinfo = p->devinfo;
3150 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3151 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3152 GEN6_SFID_DATAPORT_RENDER_CACHE);
3153 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3154 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3155 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3156 struct brw_inst *insn = brw_send_indirect_surface_message(
3157 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3158 brw_surface_payload_size(p, response_expected,
3159 devinfo->gen >= 8 || devinfo->is_haswell, false),
3160 header_present);
3161
3162 brw_set_dp_typed_atomic_message(
3163 p, insn, atomic_op, response_expected);
3164 }
3165
3166 static void
3167 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3168 struct brw_inst *insn,
3169 unsigned num_channels)
3170 {
3171 const struct gen_device_info *devinfo = p->devinfo;
3172 /* Set mask of unused channels. */
3173 unsigned msg_control = 0xf & (0xf << num_channels);
3174
3175 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3176 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3177 if ((brw_get_default_group(p) / 8) % 2 == 1)
3178 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3179 else
3180 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3181 }
3182
3183 brw_inst_set_dp_msg_type(devinfo, insn,
3184 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3185 } else {
3186 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3187 if ((brw_get_default_group(p) / 8) % 2 == 1)
3188 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3189 }
3190
3191 brw_inst_set_dp_msg_type(devinfo, insn,
3192 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3193 }
3194
3195 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3196 }
3197
3198 void
3199 brw_typed_surface_read(struct brw_codegen *p,
3200 struct brw_reg dst,
3201 struct brw_reg payload,
3202 struct brw_reg surface,
3203 unsigned msg_length,
3204 unsigned num_channels,
3205 bool header_present)
3206 {
3207 const struct gen_device_info *devinfo = p->devinfo;
3208 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3209 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3210 GEN6_SFID_DATAPORT_RENDER_CACHE);
3211 struct brw_inst *insn = brw_send_indirect_surface_message(
3212 p, sfid, dst, payload, surface, msg_length,
3213 brw_surface_payload_size(p, num_channels,
3214 devinfo->gen >= 8 || devinfo->is_haswell, false),
3215 header_present);
3216
3217 brw_set_dp_typed_surface_read_message(
3218 p, insn, num_channels);
3219 }
3220
3221 static void
3222 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3223 struct brw_inst *insn,
3224 unsigned num_channels)
3225 {
3226 const struct gen_device_info *devinfo = p->devinfo;
3227 /* Set mask of unused channels. */
3228 unsigned msg_control = 0xf & (0xf << num_channels);
3229
3230 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3231 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3232 if ((brw_get_default_group(p) / 8) % 2 == 1)
3233 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3234 else
3235 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3236 }
3237
3238 brw_inst_set_dp_msg_type(devinfo, insn,
3239 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3240
3241 } else {
3242 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3243 if ((brw_get_default_group(p) / 8) % 2 == 1)
3244 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3245 }
3246
3247 brw_inst_set_dp_msg_type(devinfo, insn,
3248 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3249 }
3250
3251 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3252 }
3253
3254 void
3255 brw_typed_surface_write(struct brw_codegen *p,
3256 struct brw_reg payload,
3257 struct brw_reg surface,
3258 unsigned msg_length,
3259 unsigned num_channels,
3260 bool header_present)
3261 {
3262 const struct gen_device_info *devinfo = p->devinfo;
3263 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3264 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3265 GEN6_SFID_DATAPORT_RENDER_CACHE);
3266 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3267 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3268 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3269 WRITEMASK_X : WRITEMASK_XYZW);
3270 struct brw_inst *insn = brw_send_indirect_surface_message(
3271 p, sfid, brw_writemask(brw_null_reg(), mask),
3272 payload, surface, msg_length, 0, header_present);
3273
3274 brw_set_dp_typed_surface_write_message(
3275 p, insn, num_channels);
3276 }
3277
3278 static void
3279 brw_set_memory_fence_message(struct brw_codegen *p,
3280 struct brw_inst *insn,
3281 enum brw_message_target sfid,
3282 bool commit_enable)
3283 {
3284 const struct gen_device_info *devinfo = p->devinfo;
3285
3286 brw_set_desc(p, insn, brw_message_desc(
3287 devinfo, 1, (commit_enable ? 1 : 0), true));
3288
3289 brw_inst_set_sfid(devinfo, insn, sfid);
3290
3291 switch (sfid) {
3292 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3293 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3294 break;
3295 case GEN7_SFID_DATAPORT_DATA_CACHE:
3296 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3297 break;
3298 default:
3299 unreachable("Not reached");
3300 }
3301
3302 if (commit_enable)
3303 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3304 }
3305
3306 void
3307 brw_memory_fence(struct brw_codegen *p,
3308 struct brw_reg dst,
3309 enum opcode send_op)
3310 {
3311 const struct gen_device_info *devinfo = p->devinfo;
3312 const bool commit_enable =
3313 devinfo->gen >= 10 || /* HSD ES # 1404612949 */
3314 (devinfo->gen == 7 && !devinfo->is_haswell);
3315 struct brw_inst *insn;
3316
3317 brw_push_insn_state(p);
3318 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3319 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3320 dst = vec1(dst);
3321
3322 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3323 * message doesn't write anything back.
3324 */
3325 insn = next_insn(p, send_op);
3326 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3327 brw_set_dest(p, insn, dst);
3328 brw_set_src0(p, insn, dst);
3329 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3330 commit_enable);
3331
3332 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3333 /* IVB does typed surface access through the render cache, so we need to
3334 * flush it too. Use a different register so both flushes can be
3335 * pipelined by the hardware.
3336 */
3337 insn = next_insn(p, send_op);
3338 brw_set_dest(p, insn, offset(dst, 1));
3339 brw_set_src0(p, insn, offset(dst, 1));
3340 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3341 commit_enable);
3342
3343 /* Now write the response of the second message into the response of the
3344 * first to trigger a pipeline stall -- This way future render and data
3345 * cache messages will be properly ordered with respect to past data and
3346 * render cache messages.
3347 */
3348 brw_MOV(p, dst, offset(dst, 1));
3349 }
3350
3351 brw_pop_insn_state(p);
3352 }
3353
3354 void
3355 brw_pixel_interpolator_query(struct brw_codegen *p,
3356 struct brw_reg dest,
3357 struct brw_reg mrf,
3358 bool noperspective,
3359 unsigned mode,
3360 struct brw_reg data,
3361 unsigned msg_length,
3362 unsigned response_length)
3363 {
3364 const struct gen_device_info *devinfo = p->devinfo;
3365 struct brw_inst *insn;
3366 const uint16_t exec_size = brw_get_default_exec_size(p);
3367 const uint16_t qtr_ctrl = brw_get_default_group(p) / 8;
3368
3369 /* brw_send_indirect_message will automatically use a direct send message
3370 * if data is actually immediate.
3371 */
3372 insn = brw_send_indirect_message(p,
3373 GEN7_SFID_PIXEL_INTERPOLATOR,
3374 dest,
3375 mrf,
3376 vec1(data));
3377 brw_inst_set_mlen(devinfo, insn, msg_length);
3378 brw_inst_set_rlen(devinfo, insn, response_length);
3379
3380 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3381 brw_inst_set_pi_slot_group(devinfo, insn, qtr_ctrl / 2);
3382 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3383 brw_inst_set_pi_message_type(devinfo, insn, mode);
3384 }
3385
3386 void
3387 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3388 struct brw_reg mask)
3389 {
3390 const struct gen_device_info *devinfo = p->devinfo;
3391 const unsigned exec_size = 1 << brw_get_default_exec_size(p);
3392 const unsigned qtr_control = brw_get_default_group(p) / 8;
3393 brw_inst *inst;
3394
3395 assert(devinfo->gen >= 7);
3396 assert(mask.type == BRW_REGISTER_TYPE_UD);
3397
3398 brw_push_insn_state(p);
3399
3400 if (brw_get_default_access_mode(p) == BRW_ALIGN_1) {
3401 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3402
3403 if (devinfo->gen >= 8) {
3404 /* Getting the first active channel index is easy on Gen8: Just find
3405 * the first bit set in the execution mask. The register exists on
3406 * HSW already but it reads back as all ones when the current
3407 * instruction has execution masking disabled, so it's kind of
3408 * useless.
3409 */
3410 struct brw_reg exec_mask =
3411 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3412
3413 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3414 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3415 /* Unfortunately, ce0 does not take into account the thread
3416 * dispatch mask, which may be a problem in cases where it's not
3417 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3418 * some n). Combine ce0 with the given dispatch (or vector) mask
3419 * to mask off those channels which were never dispatched by the
3420 * hardware.
3421 */
3422 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3423 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3424 exec_mask = vec1(dst);
3425 }
3426
3427 /* Quarter control has the effect of magically shifting the value of
3428 * ce0 so you'll get the first active channel relative to the
3429 * specified quarter control as result.
3430 */
3431 inst = brw_FBL(p, vec1(dst), exec_mask);
3432 } else {
3433 const struct brw_reg flag = brw_flag_reg(p->current->flag_subreg / 2,
3434 p->current->flag_subreg % 2);
3435
3436 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3437 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3438
3439 /* Run enough instructions returning zero with execution masking and
3440 * a conditional modifier enabled in order to get the full execution
3441 * mask in f1.0. We could use a single 32-wide move here if it
3442 * weren't because of the hardware bug that causes channel enables to
3443 * be applied incorrectly to the second half of 32-wide instructions
3444 * on Gen7.
3445 */
3446 const unsigned lower_size = MIN2(16, exec_size);
3447 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3448 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3449 brw_imm_uw(0));
3450 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3451 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3452 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3453 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3454 }
3455
3456 /* Find the first bit set in the exec_size-wide portion of the flag
3457 * register that was updated by the last sequence of MOV
3458 * instructions.
3459 */
3460 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3461 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3462 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3463 }
3464 } else {
3465 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3466
3467 if (devinfo->gen >= 8 &&
3468 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3469 /* In SIMD4x2 mode the first active channel index is just the
3470 * negation of the first bit of the mask register. Note that ce0
3471 * doesn't take into account the dispatch mask, so the Gen7 path
3472 * should be used instead unless you have the guarantee that the
3473 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3474 * for some n).
3475 */
3476 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3477 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3478 brw_imm_ud(1));
3479
3480 } else {
3481 /* Overwrite the destination without and with execution masking to
3482 * find out which of the channels is active.
3483 */
3484 brw_push_insn_state(p);
3485 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3486 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3487 brw_imm_ud(1));
3488
3489 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3490 brw_imm_ud(0));
3491 brw_pop_insn_state(p);
3492 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3493 }
3494 }
3495
3496 brw_pop_insn_state(p);
3497 }
3498
3499 void
3500 brw_broadcast(struct brw_codegen *p,
3501 struct brw_reg dst,
3502 struct brw_reg src,
3503 struct brw_reg idx)
3504 {
3505 const struct gen_device_info *devinfo = p->devinfo;
3506 const bool align1 = brw_get_default_access_mode(p) == BRW_ALIGN_1;
3507 brw_inst *inst;
3508
3509 brw_push_insn_state(p);
3510 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3511 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3512
3513 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3514 src.address_mode == BRW_ADDRESS_DIRECT);
3515 assert(!src.abs && !src.negate);
3516 assert(src.type == dst.type);
3517
3518 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3519 idx.file == BRW_IMMEDIATE_VALUE) {
3520 /* Trivial, the source is already uniform or the index is a constant.
3521 * We will typically not get here if the optimizer is doing its job, but
3522 * asserting would be mean.
3523 */
3524 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3525 brw_MOV(p, dst,
3526 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3527 stride(suboffset(src, 4 * i), 0, 4, 1)));
3528 } else {
3529 /* From the Haswell PRM section "Register Region Restrictions":
3530 *
3531 * "The lower bits of the AddressImmediate must not overflow to
3532 * change the register address. The lower 5 bits of Address
3533 * Immediate when added to lower 5 bits of address register gives
3534 * the sub-register offset. The upper bits of Address Immediate
3535 * when added to upper bits of address register gives the register
3536 * address. Any overflow from sub-register offset is dropped."
3537 *
3538 * Fortunately, for broadcast, we never have a sub-register offset so
3539 * this isn't an issue.
3540 */
3541 assert(src.subnr == 0);
3542
3543 if (align1) {
3544 const struct brw_reg addr =
3545 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3546 unsigned offset = src.nr * REG_SIZE + src.subnr;
3547 /* Limit in bytes of the signed indirect addressing immediate. */
3548 const unsigned limit = 512;
3549
3550 brw_push_insn_state(p);
3551 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3552 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3553
3554 /* Take into account the component size and horizontal stride. */
3555 assert(src.vstride == src.hstride + src.width);
3556 brw_SHL(p, addr, vec1(idx),
3557 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3558 src.hstride - 1));
3559
3560 /* We can only address up to limit bytes using the indirect
3561 * addressing immediate, account for the difference if the source
3562 * register is above this limit.
3563 */
3564 if (offset >= limit) {
3565 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3566 offset = offset % limit;
3567 }
3568
3569 brw_pop_insn_state(p);
3570
3571 /* Use indirect addressing to fetch the specified component. */
3572 if (type_sz(src.type) > 4 &&
3573 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
3574 /* From the Cherryview PRM Vol 7. "Register Region Restrictions":
3575 *
3576 * "When source or destination datatype is 64b or operation is
3577 * integer DWord multiply, indirect addressing must not be
3578 * used."
3579 *
3580 * To work around both of this issue, we do two integer MOVs
3581 * insead of one 64-bit MOV. Because no double value should ever
3582 * cross a register boundary, it's safe to use the immediate
3583 * offset in the indirect here to handle adding 4 bytes to the
3584 * offset and avoid the extra ADD to the register file.
3585 */
3586 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
3587 retype(brw_vec1_indirect(addr.subnr, offset),
3588 BRW_REGISTER_TYPE_D));
3589 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
3590 retype(brw_vec1_indirect(addr.subnr, offset + 4),
3591 BRW_REGISTER_TYPE_D));
3592 } else {
3593 brw_MOV(p, dst,
3594 retype(brw_vec1_indirect(addr.subnr, offset), src.type));
3595 }
3596 } else {
3597 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3598 * to all bits of a flag register,
3599 */
3600 inst = brw_MOV(p,
3601 brw_null_reg(),
3602 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3603 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3604 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3605 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3606
3607 /* and use predicated SEL to pick the right channel. */
3608 inst = brw_SEL(p, dst,
3609 stride(suboffset(src, 4), 4, 4, 1),
3610 stride(src, 4, 4, 1));
3611 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3612 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3613 }
3614 }
3615
3616 brw_pop_insn_state(p);
3617 }
3618
3619 /**
3620 * This instruction is generated as a single-channel align1 instruction by
3621 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3622 *
3623 * We can't use the typed atomic op in the FS because that has the execution
3624 * mask ANDed with the pixel mask, but we just want to write the one dword for
3625 * all the pixels.
3626 *
3627 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3628 * one u32. So we use the same untyped atomic write message as the pixel
3629 * shader.
3630 *
3631 * The untyped atomic operation requires a BUFFER surface type with RAW
3632 * format, and is only accessible through the legacy DATA_CACHE dataport
3633 * messages.
3634 */
3635 void brw_shader_time_add(struct brw_codegen *p,
3636 struct brw_reg payload,
3637 uint32_t surf_index)
3638 {
3639 const struct gen_device_info *devinfo = p->devinfo;
3640 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3641 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3642 GEN7_SFID_DATAPORT_DATA_CACHE);
3643 assert(devinfo->gen >= 7);
3644
3645 brw_push_insn_state(p);
3646 brw_set_default_access_mode(p, BRW_ALIGN_1);
3647 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3648 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3649 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3650
3651 /* We use brw_vec1_reg and unmasked because we want to increment the given
3652 * offset only once.
3653 */
3654 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3655 BRW_ARF_NULL, 0));
3656 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3657 payload.nr, 0));
3658 brw_set_src1(p, send, brw_imm_ud(0));
3659 brw_set_desc(p, send, brw_message_desc(devinfo, 2, 0, false));
3660 brw_inst_set_sfid(devinfo, send, sfid);
3661 brw_inst_set_binding_table_index(devinfo, send, surf_index);
3662 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3663
3664 brw_pop_insn_state(p);
3665 }
3666
3667
3668 /**
3669 * Emit the SEND message for a barrier
3670 */
3671 void
3672 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3673 {
3674 const struct gen_device_info *devinfo = p->devinfo;
3675 struct brw_inst *inst;
3676
3677 assert(devinfo->gen >= 7);
3678
3679 brw_push_insn_state(p);
3680 brw_set_default_access_mode(p, BRW_ALIGN_1);
3681 inst = next_insn(p, BRW_OPCODE_SEND);
3682 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3683 brw_set_src0(p, inst, src);
3684 brw_set_src1(p, inst, brw_null_reg());
3685 brw_set_desc(p, inst, brw_message_desc(devinfo, 1, 0, false));
3686
3687 brw_inst_set_sfid(devinfo, inst, BRW_SFID_MESSAGE_GATEWAY);
3688 brw_inst_set_gateway_notify(devinfo, inst, 1);
3689 brw_inst_set_gateway_subfuncid(devinfo, inst,
3690 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3691
3692 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3693 brw_pop_insn_state(p);
3694 }
3695
3696
3697 /**
3698 * Emit the wait instruction for a barrier
3699 */
3700 void
3701 brw_WAIT(struct brw_codegen *p)
3702 {
3703 const struct gen_device_info *devinfo = p->devinfo;
3704 struct brw_inst *insn;
3705
3706 struct brw_reg src = brw_notification_reg();
3707
3708 insn = next_insn(p, BRW_OPCODE_WAIT);
3709 brw_set_dest(p, insn, src);
3710 brw_set_src0(p, insn, src);
3711 brw_set_src1(p, insn, brw_null_reg());
3712
3713 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3714 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3715 }
3716
3717 /**
3718 * Changes the floating point rounding mode updating the control register
3719 * field defined at cr0.0[5-6] bits. This function supports the changes to
3720 * RTNE (00), RU (01), RD (10) and RTZ (11) rounding using bitwise operations.
3721 * Only RTNE and RTZ rounding are enabled at nir.
3722 */
3723 void
3724 brw_rounding_mode(struct brw_codegen *p,
3725 enum brw_rnd_mode mode)
3726 {
3727 const unsigned bits = mode << BRW_CR0_RND_MODE_SHIFT;
3728
3729 if (bits != BRW_CR0_RND_MODE_MASK) {
3730 brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
3731 brw_imm_ud(~BRW_CR0_RND_MODE_MASK));
3732 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3733
3734 /* From the Skylake PRM, Volume 7, page 760:
3735 * "Implementation Restriction on Register Access: When the control
3736 * register is used as an explicit source and/or destination, hardware
3737 * does not ensure execution pipeline coherency. Software must set the
3738 * thread control field to ‘switch’ for an instruction that uses
3739 * control register as an explicit operand."
3740 */
3741 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3742 }
3743
3744 if (bits) {
3745 brw_inst *inst = brw_OR(p, brw_cr0_reg(0), brw_cr0_reg(0),
3746 brw_imm_ud(bits));
3747 brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
3748 brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
3749 }
3750 }