fc187d16736d541c998bb1fbff60035847c9cf06
[mesa.git] / src / mesa / drivers / dri / i965 / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_context.h"
34 #include "brw_defines.h"
35 #include "brw_eu.h"
36
37 #include "util/ralloc.h"
38
39 /**
40 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
41 * registers, implicitly moving the operand to a message register.
42 *
43 * On Sandybridge, this is no longer the case. This function performs the
44 * explicit move; it should be called before emitting a SEND instruction.
45 */
46 void
47 gen6_resolve_implied_move(struct brw_codegen *p,
48 struct brw_reg *src,
49 unsigned msg_reg_nr)
50 {
51 const struct brw_device_info *devinfo = p->devinfo;
52 if (devinfo->gen < 6)
53 return;
54
55 if (src->file == BRW_MESSAGE_REGISTER_FILE)
56 return;
57
58 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
59 brw_push_insn_state(p);
60 brw_set_default_exec_size(p, BRW_EXECUTE_8);
61 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
62 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
63 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
64 retype(*src, BRW_REGISTER_TYPE_UD));
65 brw_pop_insn_state(p);
66 }
67 *src = brw_message_reg(msg_reg_nr);
68 }
69
70 static void
71 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
72 {
73 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
74 * "The send with EOT should use register space R112-R127 for <src>. This is
75 * to enable loading of a new thread into the same slot while the message
76 * with EOT for current thread is pending dispatch."
77 *
78 * Since we're pretending to have 16 MRFs anyway, we may as well use the
79 * registers required for messages with EOT.
80 */
81 const struct brw_device_info *devinfo = p->devinfo;
82 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
83 reg->file = BRW_GENERAL_REGISTER_FILE;
84 reg->nr += GEN7_MRF_HACK_START;
85 }
86 }
87
88 /**
89 * Convert a brw_reg_type enumeration value into the hardware representation.
90 *
91 * The hardware encoding may depend on whether the value is an immediate.
92 */
93 unsigned
94 brw_reg_type_to_hw_type(const struct brw_device_info *devinfo,
95 enum brw_reg_type type, enum brw_reg_file file)
96 {
97 if (file == BRW_IMMEDIATE_VALUE) {
98 static const int imm_hw_types[] = {
99 [BRW_REGISTER_TYPE_UD] = BRW_HW_REG_TYPE_UD,
100 [BRW_REGISTER_TYPE_D] = BRW_HW_REG_TYPE_D,
101 [BRW_REGISTER_TYPE_UW] = BRW_HW_REG_TYPE_UW,
102 [BRW_REGISTER_TYPE_W] = BRW_HW_REG_TYPE_W,
103 [BRW_REGISTER_TYPE_F] = BRW_HW_REG_TYPE_F,
104 [BRW_REGISTER_TYPE_UB] = -1,
105 [BRW_REGISTER_TYPE_B] = -1,
106 [BRW_REGISTER_TYPE_UV] = BRW_HW_REG_IMM_TYPE_UV,
107 [BRW_REGISTER_TYPE_VF] = BRW_HW_REG_IMM_TYPE_VF,
108 [BRW_REGISTER_TYPE_V] = BRW_HW_REG_IMM_TYPE_V,
109 [BRW_REGISTER_TYPE_DF] = GEN8_HW_REG_IMM_TYPE_DF,
110 [BRW_REGISTER_TYPE_HF] = GEN8_HW_REG_IMM_TYPE_HF,
111 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_REG_TYPE_UQ,
112 [BRW_REGISTER_TYPE_Q] = GEN8_HW_REG_TYPE_Q,
113 };
114 assert(type < ARRAY_SIZE(imm_hw_types));
115 assert(imm_hw_types[type] != -1);
116 assert(devinfo->gen >= 8 || type < BRW_REGISTER_TYPE_DF);
117 return imm_hw_types[type];
118 } else {
119 /* Non-immediate registers */
120 static const int hw_types[] = {
121 [BRW_REGISTER_TYPE_UD] = BRW_HW_REG_TYPE_UD,
122 [BRW_REGISTER_TYPE_D] = BRW_HW_REG_TYPE_D,
123 [BRW_REGISTER_TYPE_UW] = BRW_HW_REG_TYPE_UW,
124 [BRW_REGISTER_TYPE_W] = BRW_HW_REG_TYPE_W,
125 [BRW_REGISTER_TYPE_UB] = BRW_HW_REG_NON_IMM_TYPE_UB,
126 [BRW_REGISTER_TYPE_B] = BRW_HW_REG_NON_IMM_TYPE_B,
127 [BRW_REGISTER_TYPE_F] = BRW_HW_REG_TYPE_F,
128 [BRW_REGISTER_TYPE_UV] = -1,
129 [BRW_REGISTER_TYPE_VF] = -1,
130 [BRW_REGISTER_TYPE_V] = -1,
131 [BRW_REGISTER_TYPE_DF] = GEN7_HW_REG_NON_IMM_TYPE_DF,
132 [BRW_REGISTER_TYPE_HF] = GEN8_HW_REG_NON_IMM_TYPE_HF,
133 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_REG_TYPE_UQ,
134 [BRW_REGISTER_TYPE_Q] = GEN8_HW_REG_TYPE_Q,
135 };
136 assert(type < ARRAY_SIZE(hw_types));
137 assert(hw_types[type] != -1);
138 assert(devinfo->gen >= 7 || type < BRW_REGISTER_TYPE_DF);
139 assert(devinfo->gen >= 8 || type < BRW_REGISTER_TYPE_HF);
140 return hw_types[type];
141 }
142 }
143
144 void
145 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
146 {
147 const struct brw_device_info *devinfo = p->devinfo;
148
149 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
150 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
151 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
152 assert(dest.nr < 128);
153
154 gen7_convert_mrf_to_grf(p, &dest);
155
156 brw_inst_set_dst_reg_file(devinfo, inst, dest.file);
157 brw_inst_set_dst_reg_type(devinfo, inst,
158 brw_reg_type_to_hw_type(devinfo, dest.type,
159 dest.file));
160 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
161
162 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
163 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
164
165 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
166 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
167 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
168 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
169 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
170 } else {
171 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
172 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
173 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
174 dest.file == BRW_MESSAGE_REGISTER_FILE) {
175 assert(dest.writemask != 0);
176 }
177 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
178 * Although Dst.HorzStride is a don't care for Align16, HW needs
179 * this to be programmed as "01".
180 */
181 brw_inst_set_dst_hstride(devinfo, inst, 1);
182 }
183 } else {
184 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
185
186 /* These are different sizes in align1 vs align16:
187 */
188 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
189 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
190 dest.indirect_offset);
191 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
192 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
193 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
194 } else {
195 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
196 dest.indirect_offset);
197 /* even ignored in da16, still need to set as '01' */
198 brw_inst_set_dst_hstride(devinfo, inst, 1);
199 }
200 }
201
202 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
203 * or 16 (SIMD16), as that's normally correct. However, when dealing with
204 * small registers, we automatically reduce it to match the register size.
205 *
206 * In platforms that support fp64 we can emit instructions with a width of
207 * 4 that need two SIMD8 registers and an exec_size of 8 or 16. In these
208 * cases we need to make sure that these instructions have their exec sizes
209 * set properly when they are emitted and we can't rely on this code to fix
210 * it.
211 */
212 bool fix_exec_size;
213 if (devinfo->gen >= 6)
214 fix_exec_size = dest.width < BRW_EXECUTE_4;
215 else
216 fix_exec_size = dest.width < BRW_EXECUTE_8;
217
218 if (fix_exec_size)
219 brw_inst_set_exec_size(devinfo, inst, dest.width);
220 }
221
222 extern int reg_type_size[];
223
224 static void
225 validate_reg(const struct brw_device_info *devinfo,
226 brw_inst *inst, struct brw_reg reg)
227 {
228 const int hstride_for_reg[] = {0, 1, 2, 4};
229 const int vstride_for_reg[] = {0, 1, 2, 4, 8, 16, 32};
230 const int width_for_reg[] = {1, 2, 4, 8, 16};
231 const int execsize_for_reg[] = {1, 2, 4, 8, 16, 32};
232 int width, hstride, vstride, execsize;
233
234 if (reg.file == BRW_IMMEDIATE_VALUE) {
235 /* 3.3.6: Region Parameters. Restriction: Immediate vectors
236 * mean the destination has to be 128-bit aligned and the
237 * destination horiz stride has to be a word.
238 */
239 if (reg.type == BRW_REGISTER_TYPE_V) {
240 assert(hstride_for_reg[brw_inst_dst_hstride(devinfo, inst)] *
241 reg_type_size[brw_inst_dst_reg_type(devinfo, inst)] == 2);
242 }
243
244 return;
245 }
246
247 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
248 reg.file == BRW_ARF_NULL)
249 return;
250
251 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
252 *
253 * "Swizzling is not allowed when an accumulator is used as an implicit
254 * source or an explicit source in an instruction."
255 */
256 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
257 reg.nr == BRW_ARF_ACCUMULATOR)
258 assert(reg.swizzle == BRW_SWIZZLE_XYZW);
259
260 assert(reg.hstride >= 0 && reg.hstride < ARRAY_SIZE(hstride_for_reg));
261 hstride = hstride_for_reg[reg.hstride];
262
263 if (reg.vstride == 0xf) {
264 vstride = -1;
265 } else {
266 assert(reg.vstride >= 0 && reg.vstride < ARRAY_SIZE(vstride_for_reg));
267 vstride = vstride_for_reg[reg.vstride];
268 }
269
270 assert(reg.width >= 0 && reg.width < ARRAY_SIZE(width_for_reg));
271 width = width_for_reg[reg.width];
272
273 assert(brw_inst_exec_size(devinfo, inst) >= 0 &&
274 brw_inst_exec_size(devinfo, inst) < ARRAY_SIZE(execsize_for_reg));
275 execsize = execsize_for_reg[brw_inst_exec_size(devinfo, inst)];
276
277 /* Restrictions from 3.3.10: Register Region Restrictions. */
278 /* 3. */
279 assert(execsize >= width);
280
281 /* 4. */
282 if (execsize == width && hstride != 0) {
283 assert(vstride == -1 || vstride == width * hstride);
284 }
285
286 /* 5. */
287 if (execsize == width && hstride == 0) {
288 /* no restriction on vstride. */
289 }
290
291 /* 6. */
292 if (width == 1) {
293 assert(hstride == 0);
294 }
295
296 /* 7. */
297 if (execsize == 1 && width == 1) {
298 assert(hstride == 0);
299 assert(vstride == 0);
300 }
301
302 /* 8. */
303 if (vstride == 0 && hstride == 0) {
304 assert(width == 1);
305 }
306
307 /* 10. Check destination issues. */
308 }
309
310 static bool
311 is_compactable_immediate(unsigned imm)
312 {
313 /* We get the low 12 bits as-is. */
314 imm &= ~0xfff;
315
316 /* We get one bit replicated through the top 20 bits. */
317 return imm == 0 || imm == 0xfffff000;
318 }
319
320 void
321 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
322 {
323 const struct brw_device_info *devinfo = p->devinfo;
324
325 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
326 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
327 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
328 assert(reg.nr < 128);
329
330 gen7_convert_mrf_to_grf(p, &reg);
331
332 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
333 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
334 /* Any source modifiers or regions will be ignored, since this just
335 * identifies the MRF/GRF to start reading the message contents from.
336 * Check for some likely failures.
337 */
338 assert(!reg.negate);
339 assert(!reg.abs);
340 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
341 }
342
343 validate_reg(devinfo, inst, reg);
344
345 brw_inst_set_src0_reg_file(devinfo, inst, reg.file);
346 brw_inst_set_src0_reg_type(devinfo, inst,
347 brw_reg_type_to_hw_type(devinfo, reg.type, reg.file));
348 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
349 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
350 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
351
352 if (reg.file == BRW_IMMEDIATE_VALUE) {
353 if (reg.type == BRW_REGISTER_TYPE_DF ||
354 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
355 brw_inst_set_imm_df(devinfo, inst, reg.df);
356 else
357 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
358
359 /* The Bspec's section titled "Non-present Operands" claims that if src0
360 * is an immediate that src1's type must be the same as that of src0.
361 *
362 * The SNB+ DataTypeIndex instruction compaction tables contain mappings
363 * that do not follow this rule. E.g., from the IVB/HSW table:
364 *
365 * DataTypeIndex 18-Bit Mapping Mapped Meaning
366 * 3 001000001011111101 r:f | i:vf | a:ud | <1> | dir |
367 *
368 * And from the SNB table:
369 *
370 * DataTypeIndex 18-Bit Mapping Mapped Meaning
371 * 8 001000000111101100 a:w | i:w | a:ud | <1> | dir |
372 *
373 * Neither of these cause warnings from the simulator when used,
374 * compacted or otherwise. In fact, all compaction mappings that have an
375 * immediate in src0 use a:ud for src1.
376 *
377 * The GM45 instruction compaction tables do not contain mapped meanings
378 * so it's not clear whether it has the restriction. We'll assume it was
379 * lifted on SNB. (FINISHME: decode the GM45 tables and check.)
380 *
381 * Don't do any of this for 64-bit immediates, since the src1 fields
382 * overlap with the immediate and setting them would overwrite the
383 * immediate we set.
384 */
385 if (type_sz(reg.type) < 8) {
386 brw_inst_set_src1_reg_file(devinfo, inst,
387 BRW_ARCHITECTURE_REGISTER_FILE);
388 if (devinfo->gen < 6) {
389 brw_inst_set_src1_reg_type(devinfo, inst,
390 brw_inst_src0_reg_type(devinfo, inst));
391 } else {
392 brw_inst_set_src1_reg_type(devinfo, inst, BRW_HW_REG_TYPE_UD);
393 }
394 }
395
396 /* Compacted instructions only have 12-bits (plus 1 for the other 20)
397 * for immediate values. Presumably the hardware engineers realized
398 * that the only useful floating-point value that could be represented
399 * in this format is 0.0, which can also be represented as a VF-typed
400 * immediate, so they gave us the previously mentioned mapping on IVB+.
401 *
402 * Strangely, we do have a mapping for imm:f in src1, so we don't need
403 * to do this there.
404 *
405 * If we see a 0.0:F, change the type to VF so that it can be compacted.
406 */
407 if (brw_inst_imm_ud(devinfo, inst) == 0x0 &&
408 brw_inst_src0_reg_type(devinfo, inst) == BRW_HW_REG_TYPE_F) {
409 brw_inst_set_src0_reg_type(devinfo, inst, BRW_HW_REG_IMM_TYPE_VF);
410 }
411
412 /* There are no mappings for dst:d | i:d, so if the immediate is suitable
413 * set the types to :UD so the instruction can be compacted.
414 */
415 if (is_compactable_immediate(brw_inst_imm_ud(devinfo, inst)) &&
416 brw_inst_cond_modifier(devinfo, inst) == BRW_CONDITIONAL_NONE &&
417 brw_inst_src0_reg_type(devinfo, inst) == BRW_HW_REG_TYPE_D &&
418 brw_inst_dst_reg_type(devinfo, inst) == BRW_HW_REG_TYPE_D) {
419 brw_inst_set_src0_reg_type(devinfo, inst, BRW_HW_REG_TYPE_UD);
420 brw_inst_set_dst_reg_type(devinfo, inst, BRW_HW_REG_TYPE_UD);
421 }
422 } else {
423 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
424 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
425 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
426 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
427 } else {
428 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
429 }
430 } else {
431 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
432
433 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
434 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
435 } else {
436 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
437 }
438 }
439
440 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
441 if (reg.width == BRW_WIDTH_1 &&
442 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
443 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
444 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
445 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
446 } else {
447 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
448 brw_inst_set_src0_width(devinfo, inst, reg.width);
449 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
450 }
451 } else {
452 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
453 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
454 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
455 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
456 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
457 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
458 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
459 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
460
461 /* This is an oddity of the fact we're using the same
462 * descriptions for registers in align_16 as align_1:
463 */
464 if (reg.vstride == BRW_VERTICAL_STRIDE_8)
465 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
466 else
467 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
468 }
469 }
470 }
471
472
473 void
474 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
475 {
476 const struct brw_device_info *devinfo = p->devinfo;
477
478 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
479 assert(reg.nr < 128);
480
481 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
482 *
483 * "Accumulator registers may be accessed explicitly as src0
484 * operands only."
485 */
486 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
487 reg.nr != BRW_ARF_ACCUMULATOR);
488
489 gen7_convert_mrf_to_grf(p, &reg);
490 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
491
492 validate_reg(devinfo, inst, reg);
493
494 brw_inst_set_src1_reg_file(devinfo, inst, reg.file);
495 brw_inst_set_src1_reg_type(devinfo, inst,
496 brw_reg_type_to_hw_type(devinfo, reg.type, reg.file));
497 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
498 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
499
500 /* Only src1 can be immediate in two-argument instructions.
501 */
502 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
503
504 if (reg.file == BRW_IMMEDIATE_VALUE) {
505 /* two-argument instructions can only use 32-bit immediates */
506 assert(type_sz(reg.type) < 8);
507 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
508 } else {
509 /* This is a hardware restriction, which may or may not be lifted
510 * in the future:
511 */
512 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
513 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
514
515 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
516 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
517 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
518 } else {
519 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
520 }
521
522 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
523 if (reg.width == BRW_WIDTH_1 &&
524 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
525 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
526 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
527 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
528 } else {
529 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
530 brw_inst_set_src1_width(devinfo, inst, reg.width);
531 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
532 }
533 } else {
534 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
535 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
536 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
537 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
538 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
539 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
540 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
541 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
542
543 /* This is an oddity of the fact we're using the same
544 * descriptions for registers in align_16 as align_1:
545 */
546 if (reg.vstride == BRW_VERTICAL_STRIDE_8)
547 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
548 else
549 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
550 }
551 }
552 }
553
554 /**
555 * Set the Message Descriptor and Extended Message Descriptor fields
556 * for SEND messages.
557 *
558 * \note This zeroes out the Function Control bits, so it must be called
559 * \b before filling out any message-specific data. Callers can
560 * choose not to fill in irrelevant bits; they will be zero.
561 */
562 void
563 brw_set_message_descriptor(struct brw_codegen *p,
564 brw_inst *inst,
565 enum brw_message_target sfid,
566 unsigned msg_length,
567 unsigned response_length,
568 bool header_present,
569 bool end_of_thread)
570 {
571 const struct brw_device_info *devinfo = p->devinfo;
572
573 brw_set_src1(p, inst, brw_imm_d(0));
574
575 /* For indirect sends, `inst` will not be the SEND/SENDC instruction
576 * itself; instead, it will be a MOV/OR into the address register.
577 *
578 * In this case, we avoid setting the extended message descriptor bits,
579 * since they go on the later SEND/SENDC instead and if set here would
580 * instead clobber the conditionalmod bits.
581 */
582 unsigned opcode = brw_inst_opcode(devinfo, inst);
583 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
584 brw_inst_set_sfid(devinfo, inst, sfid);
585 }
586
587 brw_inst_set_mlen(devinfo, inst, msg_length);
588 brw_inst_set_rlen(devinfo, inst, response_length);
589 brw_inst_set_eot(devinfo, inst, end_of_thread);
590
591 if (devinfo->gen >= 5) {
592 brw_inst_set_header_present(devinfo, inst, header_present);
593 }
594 }
595
596 static void brw_set_math_message( struct brw_codegen *p,
597 brw_inst *inst,
598 unsigned function,
599 unsigned integer_type,
600 bool low_precision,
601 unsigned dataType )
602 {
603 const struct brw_device_info *devinfo = p->devinfo;
604 unsigned msg_length;
605 unsigned response_length;
606
607 /* Infer message length from the function */
608 switch (function) {
609 case BRW_MATH_FUNCTION_POW:
610 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
611 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
612 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
613 msg_length = 2;
614 break;
615 default:
616 msg_length = 1;
617 break;
618 }
619
620 /* Infer response length from the function */
621 switch (function) {
622 case BRW_MATH_FUNCTION_SINCOS:
623 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
624 response_length = 2;
625 break;
626 default:
627 response_length = 1;
628 break;
629 }
630
631
632 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
633 msg_length, response_length, false, false);
634 brw_inst_set_math_msg_function(devinfo, inst, function);
635 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
636 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
637 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
638 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
639 brw_inst_set_saturate(devinfo, inst, 0);
640 }
641
642
643 static void brw_set_ff_sync_message(struct brw_codegen *p,
644 brw_inst *insn,
645 bool allocate,
646 unsigned response_length,
647 bool end_of_thread)
648 {
649 const struct brw_device_info *devinfo = p->devinfo;
650
651 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
652 1, response_length, true, end_of_thread);
653 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
654 brw_inst_set_urb_allocate(devinfo, insn, allocate);
655 /* The following fields are not used by FF_SYNC: */
656 brw_inst_set_urb_global_offset(devinfo, insn, 0);
657 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
658 brw_inst_set_urb_used(devinfo, insn, 0);
659 brw_inst_set_urb_complete(devinfo, insn, 0);
660 }
661
662 static void brw_set_urb_message( struct brw_codegen *p,
663 brw_inst *insn,
664 enum brw_urb_write_flags flags,
665 unsigned msg_length,
666 unsigned response_length,
667 unsigned offset,
668 unsigned swizzle_control )
669 {
670 const struct brw_device_info *devinfo = p->devinfo;
671
672 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
673 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
674 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
675
676 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
677 msg_length, response_length, true,
678 flags & BRW_URB_WRITE_EOT);
679
680 if (flags & BRW_URB_WRITE_OWORD) {
681 assert(msg_length == 2); /* header + one OWORD of data */
682 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
683 } else {
684 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
685 }
686
687 brw_inst_set_urb_global_offset(devinfo, insn, offset);
688 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
689
690 if (devinfo->gen < 8) {
691 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
692 }
693
694 if (devinfo->gen < 7) {
695 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
696 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
697 } else {
698 brw_inst_set_urb_per_slot_offset(devinfo, insn,
699 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
700 }
701 }
702
703 void
704 brw_set_dp_write_message(struct brw_codegen *p,
705 brw_inst *insn,
706 unsigned binding_table_index,
707 unsigned msg_control,
708 unsigned msg_type,
709 unsigned msg_length,
710 bool header_present,
711 unsigned last_render_target,
712 unsigned response_length,
713 unsigned end_of_thread,
714 unsigned send_commit_msg)
715 {
716 const struct brw_device_info *devinfo = p->devinfo;
717 unsigned sfid;
718
719 if (devinfo->gen >= 7) {
720 /* Use the Render Cache for RT writes; otherwise use the Data Cache */
721 if (msg_type == GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE)
722 sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
723 else
724 sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
725 } else if (devinfo->gen == 6) {
726 /* Use the render cache for all write messages. */
727 sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
728 } else {
729 sfid = BRW_SFID_DATAPORT_WRITE;
730 }
731
732 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
733 header_present, end_of_thread);
734
735 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
736 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
737 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
738 brw_inst_set_rt_last(devinfo, insn, last_render_target);
739 if (devinfo->gen < 7) {
740 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
741 }
742 }
743
744 void
745 brw_set_dp_read_message(struct brw_codegen *p,
746 brw_inst *insn,
747 unsigned binding_table_index,
748 unsigned msg_control,
749 unsigned msg_type,
750 unsigned target_cache,
751 unsigned msg_length,
752 bool header_present,
753 unsigned response_length)
754 {
755 const struct brw_device_info *devinfo = p->devinfo;
756 unsigned sfid;
757
758 if (devinfo->gen >= 7) {
759 sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
760 } else if (devinfo->gen == 6) {
761 if (target_cache == BRW_DATAPORT_READ_TARGET_RENDER_CACHE)
762 sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
763 else
764 sfid = GEN6_SFID_DATAPORT_SAMPLER_CACHE;
765 } else {
766 sfid = BRW_SFID_DATAPORT_READ;
767 }
768
769 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
770 header_present, false);
771
772 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
773 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
774 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
775 if (devinfo->gen < 6)
776 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
777 }
778
779 void
780 brw_set_sampler_message(struct brw_codegen *p,
781 brw_inst *inst,
782 unsigned binding_table_index,
783 unsigned sampler,
784 unsigned msg_type,
785 unsigned response_length,
786 unsigned msg_length,
787 unsigned header_present,
788 unsigned simd_mode,
789 unsigned return_format)
790 {
791 const struct brw_device_info *devinfo = p->devinfo;
792
793 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
794 response_length, header_present, false);
795
796 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
797 brw_inst_set_sampler(devinfo, inst, sampler);
798 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
799 if (devinfo->gen >= 5) {
800 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
801 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
802 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
803 }
804 }
805
806 static void
807 gen7_set_dp_scratch_message(struct brw_codegen *p,
808 brw_inst *inst,
809 bool write,
810 bool dword,
811 bool invalidate_after_read,
812 unsigned num_regs,
813 unsigned addr_offset,
814 unsigned mlen,
815 unsigned rlen,
816 bool header_present)
817 {
818 const struct brw_device_info *devinfo = p->devinfo;
819 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
820 (devinfo->gen >= 8 && num_regs == 8));
821 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
822 num_regs - 1);
823
824 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
825 mlen, rlen, header_present, false);
826 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
827 brw_inst_set_scratch_read_write(devinfo, inst, write);
828 brw_inst_set_scratch_type(devinfo, inst, dword);
829 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
830 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
831 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
832 }
833
834 #define next_insn brw_next_insn
835 brw_inst *
836 brw_next_insn(struct brw_codegen *p, unsigned opcode)
837 {
838 const struct brw_device_info *devinfo = p->devinfo;
839 brw_inst *insn;
840
841 if (p->nr_insn + 1 > p->store_size) {
842 p->store_size <<= 1;
843 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
844 }
845
846 p->next_insn_offset += 16;
847 insn = &p->store[p->nr_insn++];
848 memcpy(insn, p->current, sizeof(*insn));
849
850 brw_inst_set_opcode(devinfo, insn, opcode);
851 return insn;
852 }
853
854 static brw_inst *
855 brw_alu1(struct brw_codegen *p, unsigned opcode,
856 struct brw_reg dest, struct brw_reg src)
857 {
858 brw_inst *insn = next_insn(p, opcode);
859 brw_set_dest(p, insn, dest);
860 brw_set_src0(p, insn, src);
861 return insn;
862 }
863
864 static brw_inst *
865 brw_alu2(struct brw_codegen *p, unsigned opcode,
866 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
867 {
868 /* 64-bit immediates are only supported on 1-src instructions */
869 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
870 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
871
872 brw_inst *insn = next_insn(p, opcode);
873 brw_set_dest(p, insn, dest);
874 brw_set_src0(p, insn, src0);
875 brw_set_src1(p, insn, src1);
876 return insn;
877 }
878
879 static int
880 get_3src_subreg_nr(struct brw_reg reg)
881 {
882 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
883 * use 32-bit units (components 0..7). Since they only support F/D/UD
884 * types, this doesn't lose any flexibility, but uses fewer bits.
885 */
886 return reg.subnr / 4;
887 }
888
889 static brw_inst *
890 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
891 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
892 {
893 const struct brw_device_info *devinfo = p->devinfo;
894 brw_inst *inst = next_insn(p, opcode);
895
896 gen7_convert_mrf_to_grf(p, &dest);
897
898 assert(brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16);
899
900 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
901 dest.file == BRW_MESSAGE_REGISTER_FILE);
902 assert(dest.nr < 128);
903 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
904 assert(dest.type == BRW_REGISTER_TYPE_F ||
905 dest.type == BRW_REGISTER_TYPE_DF ||
906 dest.type == BRW_REGISTER_TYPE_D ||
907 dest.type == BRW_REGISTER_TYPE_UD);
908 if (devinfo->gen == 6) {
909 brw_inst_set_3src_dst_reg_file(devinfo, inst,
910 dest.file == BRW_MESSAGE_REGISTER_FILE);
911 }
912 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
913 brw_inst_set_3src_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
914 brw_inst_set_3src_dst_writemask(devinfo, inst, dest.writemask);
915
916 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
917 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
918 assert(src0.nr < 128);
919 brw_inst_set_3src_src0_swizzle(devinfo, inst, src0.swizzle);
920 brw_inst_set_3src_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
921 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
922 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
923 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
924 brw_inst_set_3src_src0_rep_ctrl(devinfo, inst,
925 src0.vstride == BRW_VERTICAL_STRIDE_0);
926
927 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
928 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
929 assert(src1.nr < 128);
930 brw_inst_set_3src_src1_swizzle(devinfo, inst, src1.swizzle);
931 brw_inst_set_3src_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
932 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
933 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
934 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
935 brw_inst_set_3src_src1_rep_ctrl(devinfo, inst,
936 src1.vstride == BRW_VERTICAL_STRIDE_0);
937
938 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
939 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
940 assert(src2.nr < 128);
941 brw_inst_set_3src_src2_swizzle(devinfo, inst, src2.swizzle);
942 brw_inst_set_3src_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
943 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
944 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
945 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
946 brw_inst_set_3src_src2_rep_ctrl(devinfo, inst,
947 src2.vstride == BRW_VERTICAL_STRIDE_0);
948
949 if (devinfo->gen >= 7) {
950 /* Set both the source and destination types based on dest.type,
951 * ignoring the source register types. The MAD and LRP emitters ensure
952 * that all four types are float. The BFE and BFI2 emitters, however,
953 * may send us mixed D and UD types and want us to ignore that and use
954 * the destination type.
955 */
956 switch (dest.type) {
957 case BRW_REGISTER_TYPE_F:
958 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_F);
959 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_F);
960 break;
961 case BRW_REGISTER_TYPE_DF:
962 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_DF);
963 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_DF);
964 break;
965 case BRW_REGISTER_TYPE_D:
966 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_D);
967 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_D);
968 break;
969 case BRW_REGISTER_TYPE_UD:
970 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_UD);
971 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_UD);
972 break;
973 default:
974 unreachable("not reached");
975 }
976 }
977
978 return inst;
979 }
980
981
982 /***********************************************************************
983 * Convenience routines.
984 */
985 #define ALU1(OP) \
986 brw_inst *brw_##OP(struct brw_codegen *p, \
987 struct brw_reg dest, \
988 struct brw_reg src0) \
989 { \
990 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
991 }
992
993 #define ALU2(OP) \
994 brw_inst *brw_##OP(struct brw_codegen *p, \
995 struct brw_reg dest, \
996 struct brw_reg src0, \
997 struct brw_reg src1) \
998 { \
999 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
1000 }
1001
1002 #define ALU3(OP) \
1003 brw_inst *brw_##OP(struct brw_codegen *p, \
1004 struct brw_reg dest, \
1005 struct brw_reg src0, \
1006 struct brw_reg src1, \
1007 struct brw_reg src2) \
1008 { \
1009 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
1010 }
1011
1012 #define ALU3F(OP) \
1013 brw_inst *brw_##OP(struct brw_codegen *p, \
1014 struct brw_reg dest, \
1015 struct brw_reg src0, \
1016 struct brw_reg src1, \
1017 struct brw_reg src2) \
1018 { \
1019 assert(dest.type == BRW_REGISTER_TYPE_F || \
1020 dest.type == BRW_REGISTER_TYPE_DF); \
1021 if (dest.type == BRW_REGISTER_TYPE_F) { \
1022 assert(src0.type == BRW_REGISTER_TYPE_F); \
1023 assert(src1.type == BRW_REGISTER_TYPE_F); \
1024 assert(src2.type == BRW_REGISTER_TYPE_F); \
1025 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
1026 assert(src0.type == BRW_REGISTER_TYPE_DF); \
1027 assert(src1.type == BRW_REGISTER_TYPE_DF); \
1028 assert(src2.type == BRW_REGISTER_TYPE_DF); \
1029 } \
1030 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
1031 }
1032
1033 /* Rounding operations (other than RNDD) require two instructions - the first
1034 * stores a rounded value (possibly the wrong way) in the dest register, but
1035 * also sets a per-channel "increment bit" in the flag register. A predicated
1036 * add of 1.0 fixes dest to contain the desired result.
1037 *
1038 * Sandybridge and later appear to round correctly without an ADD.
1039 */
1040 #define ROUND(OP) \
1041 void brw_##OP(struct brw_codegen *p, \
1042 struct brw_reg dest, \
1043 struct brw_reg src) \
1044 { \
1045 const struct brw_device_info *devinfo = p->devinfo; \
1046 brw_inst *rnd, *add; \
1047 rnd = next_insn(p, BRW_OPCODE_##OP); \
1048 brw_set_dest(p, rnd, dest); \
1049 brw_set_src0(p, rnd, src); \
1050 \
1051 if (devinfo->gen < 6) { \
1052 /* turn on round-increments */ \
1053 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
1054 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
1055 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
1056 } \
1057 }
1058
1059
1060 ALU1(MOV)
1061 ALU2(SEL)
1062 ALU1(NOT)
1063 ALU2(AND)
1064 ALU2(OR)
1065 ALU2(XOR)
1066 ALU2(SHR)
1067 ALU2(SHL)
1068 ALU1(DIM)
1069 ALU2(ASR)
1070 ALU1(FRC)
1071 ALU1(RNDD)
1072 ALU2(MAC)
1073 ALU2(MACH)
1074 ALU1(LZD)
1075 ALU2(DP4)
1076 ALU2(DPH)
1077 ALU2(DP3)
1078 ALU2(DP2)
1079 ALU3F(MAD)
1080 ALU3F(LRP)
1081 ALU1(BFREV)
1082 ALU3(BFE)
1083 ALU2(BFI1)
1084 ALU3(BFI2)
1085 ALU1(FBH)
1086 ALU1(FBL)
1087 ALU1(CBIT)
1088 ALU2(ADDC)
1089 ALU2(SUBB)
1090
1091 ROUND(RNDZ)
1092 ROUND(RNDE)
1093
1094
1095 brw_inst *
1096 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1097 struct brw_reg src0, struct brw_reg src1)
1098 {
1099 /* 6.2.2: add */
1100 if (src0.type == BRW_REGISTER_TYPE_F ||
1101 (src0.file == BRW_IMMEDIATE_VALUE &&
1102 src0.type == BRW_REGISTER_TYPE_VF)) {
1103 assert(src1.type != BRW_REGISTER_TYPE_UD);
1104 assert(src1.type != BRW_REGISTER_TYPE_D);
1105 }
1106
1107 if (src1.type == BRW_REGISTER_TYPE_F ||
1108 (src1.file == BRW_IMMEDIATE_VALUE &&
1109 src1.type == BRW_REGISTER_TYPE_VF)) {
1110 assert(src0.type != BRW_REGISTER_TYPE_UD);
1111 assert(src0.type != BRW_REGISTER_TYPE_D);
1112 }
1113
1114 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1115 }
1116
1117 brw_inst *
1118 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1119 struct brw_reg src0, struct brw_reg src1)
1120 {
1121 assert(dest.type == src0.type);
1122 assert(src0.type == src1.type);
1123 switch (src0.type) {
1124 case BRW_REGISTER_TYPE_B:
1125 case BRW_REGISTER_TYPE_UB:
1126 case BRW_REGISTER_TYPE_W:
1127 case BRW_REGISTER_TYPE_UW:
1128 case BRW_REGISTER_TYPE_D:
1129 case BRW_REGISTER_TYPE_UD:
1130 break;
1131 default:
1132 unreachable("Bad type for brw_AVG");
1133 }
1134
1135 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1136 }
1137
1138 brw_inst *
1139 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1140 struct brw_reg src0, struct brw_reg src1)
1141 {
1142 /* 6.32.38: mul */
1143 if (src0.type == BRW_REGISTER_TYPE_D ||
1144 src0.type == BRW_REGISTER_TYPE_UD ||
1145 src1.type == BRW_REGISTER_TYPE_D ||
1146 src1.type == BRW_REGISTER_TYPE_UD) {
1147 assert(dest.type != BRW_REGISTER_TYPE_F);
1148 }
1149
1150 if (src0.type == BRW_REGISTER_TYPE_F ||
1151 (src0.file == BRW_IMMEDIATE_VALUE &&
1152 src0.type == BRW_REGISTER_TYPE_VF)) {
1153 assert(src1.type != BRW_REGISTER_TYPE_UD);
1154 assert(src1.type != BRW_REGISTER_TYPE_D);
1155 }
1156
1157 if (src1.type == BRW_REGISTER_TYPE_F ||
1158 (src1.file == BRW_IMMEDIATE_VALUE &&
1159 src1.type == BRW_REGISTER_TYPE_VF)) {
1160 assert(src0.type != BRW_REGISTER_TYPE_UD);
1161 assert(src0.type != BRW_REGISTER_TYPE_D);
1162 }
1163
1164 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1165 src0.nr != BRW_ARF_ACCUMULATOR);
1166 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1167 src1.nr != BRW_ARF_ACCUMULATOR);
1168
1169 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1170 }
1171
1172 brw_inst *
1173 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1174 struct brw_reg src0, struct brw_reg src1)
1175 {
1176 src0.vstride = BRW_VERTICAL_STRIDE_0;
1177 src0.width = BRW_WIDTH_1;
1178 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1179 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1180 }
1181
1182 brw_inst *
1183 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1184 struct brw_reg src0, struct brw_reg src1)
1185 {
1186 src0.vstride = BRW_VERTICAL_STRIDE_0;
1187 src0.width = BRW_WIDTH_1;
1188 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1189 src1.vstride = BRW_VERTICAL_STRIDE_8;
1190 src1.width = BRW_WIDTH_8;
1191 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1192 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1193 }
1194
1195 brw_inst *
1196 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1197 {
1198 const struct brw_device_info *devinfo = p->devinfo;
1199 const bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1200 /* The F32TO16 instruction doesn't support 32-bit destination types in
1201 * Align1 mode, and neither does the Gen8 implementation in terms of a
1202 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1203 * an undocumented feature.
1204 */
1205 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1206 (!align16 || devinfo->gen >= 8));
1207 brw_inst *inst;
1208
1209 if (align16) {
1210 assert(dst.type == BRW_REGISTER_TYPE_UD);
1211 } else {
1212 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1213 dst.type == BRW_REGISTER_TYPE_W ||
1214 dst.type == BRW_REGISTER_TYPE_UW ||
1215 dst.type == BRW_REGISTER_TYPE_HF);
1216 }
1217
1218 brw_push_insn_state(p);
1219
1220 if (needs_zero_fill) {
1221 brw_set_default_access_mode(p, BRW_ALIGN_1);
1222 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1223 }
1224
1225 if (devinfo->gen >= 8) {
1226 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1227 } else {
1228 assert(devinfo->gen == 7);
1229 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1230 }
1231
1232 if (needs_zero_fill) {
1233 brw_inst_set_no_dd_clear(devinfo, inst, true);
1234 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_ud(0u));
1235 brw_inst_set_no_dd_check(devinfo, inst, true);
1236 }
1237
1238 brw_pop_insn_state(p);
1239 return inst;
1240 }
1241
1242 brw_inst *
1243 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1244 {
1245 const struct brw_device_info *devinfo = p->devinfo;
1246 bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1247
1248 if (align16) {
1249 assert(src.type == BRW_REGISTER_TYPE_UD);
1250 } else {
1251 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1252 *
1253 * Because this instruction does not have a 16-bit floating-point
1254 * type, the source data type must be Word (W). The destination type
1255 * must be F (Float).
1256 */
1257 if (src.type == BRW_REGISTER_TYPE_UD)
1258 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1259
1260 assert(src.type == BRW_REGISTER_TYPE_W ||
1261 src.type == BRW_REGISTER_TYPE_UW ||
1262 src.type == BRW_REGISTER_TYPE_HF);
1263 }
1264
1265 if (devinfo->gen >= 8) {
1266 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1267 } else {
1268 assert(devinfo->gen == 7);
1269 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1270 }
1271 }
1272
1273
1274 void brw_NOP(struct brw_codegen *p)
1275 {
1276 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1277 brw_inst_set_exec_size(p->devinfo, insn, BRW_EXECUTE_1);
1278 brw_set_dest(p, insn, retype(brw_vec1_grf(0,0), BRW_REGISTER_TYPE_UD));
1279 brw_set_src0(p, insn, retype(brw_vec1_grf(0,0), BRW_REGISTER_TYPE_UD));
1280 brw_set_src1(p, insn, brw_imm_ud(0x0));
1281 }
1282
1283
1284
1285
1286
1287 /***********************************************************************
1288 * Comparisons, if/else/endif
1289 */
1290
1291 brw_inst *
1292 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1293 unsigned predicate_control)
1294 {
1295 const struct brw_device_info *devinfo = p->devinfo;
1296 struct brw_reg ip = brw_ip_reg();
1297 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1298
1299 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_2);
1300 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1301 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1302 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1303
1304 return inst;
1305 }
1306
1307 static void
1308 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1309 {
1310 p->if_stack[p->if_stack_depth] = inst - p->store;
1311
1312 p->if_stack_depth++;
1313 if (p->if_stack_array_size <= p->if_stack_depth) {
1314 p->if_stack_array_size *= 2;
1315 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1316 p->if_stack_array_size);
1317 }
1318 }
1319
1320 static brw_inst *
1321 pop_if_stack(struct brw_codegen *p)
1322 {
1323 p->if_stack_depth--;
1324 return &p->store[p->if_stack[p->if_stack_depth]];
1325 }
1326
1327 static void
1328 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1329 {
1330 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1331 p->loop_stack_array_size *= 2;
1332 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1333 p->loop_stack_array_size);
1334 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1335 p->loop_stack_array_size);
1336 }
1337
1338 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1339 p->loop_stack_depth++;
1340 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1341 }
1342
1343 static brw_inst *
1344 get_inner_do_insn(struct brw_codegen *p)
1345 {
1346 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1347 }
1348
1349 /* EU takes the value from the flag register and pushes it onto some
1350 * sort of a stack (presumably merging with any flag value already on
1351 * the stack). Within an if block, the flags at the top of the stack
1352 * control execution on each channel of the unit, eg. on each of the
1353 * 16 pixel values in our wm programs.
1354 *
1355 * When the matching 'else' instruction is reached (presumably by
1356 * countdown of the instruction count patched in by our ELSE/ENDIF
1357 * functions), the relevant flags are inverted.
1358 *
1359 * When the matching 'endif' instruction is reached, the flags are
1360 * popped off. If the stack is now empty, normal execution resumes.
1361 */
1362 brw_inst *
1363 brw_IF(struct brw_codegen *p, unsigned execute_size)
1364 {
1365 const struct brw_device_info *devinfo = p->devinfo;
1366 brw_inst *insn;
1367
1368 insn = next_insn(p, BRW_OPCODE_IF);
1369
1370 /* Override the defaults for this instruction:
1371 */
1372 if (devinfo->gen < 6) {
1373 brw_set_dest(p, insn, brw_ip_reg());
1374 brw_set_src0(p, insn, brw_ip_reg());
1375 brw_set_src1(p, insn, brw_imm_d(0x0));
1376 } else if (devinfo->gen == 6) {
1377 brw_set_dest(p, insn, brw_imm_w(0));
1378 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1379 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1380 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1381 } else if (devinfo->gen == 7) {
1382 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1383 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1384 brw_set_src1(p, insn, brw_imm_w(0));
1385 brw_inst_set_jip(devinfo, insn, 0);
1386 brw_inst_set_uip(devinfo, insn, 0);
1387 } else {
1388 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1389 brw_set_src0(p, insn, brw_imm_d(0));
1390 brw_inst_set_jip(devinfo, insn, 0);
1391 brw_inst_set_uip(devinfo, insn, 0);
1392 }
1393
1394 brw_inst_set_exec_size(devinfo, insn, execute_size);
1395 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1396 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1397 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1398 if (!p->single_program_flow && devinfo->gen < 6)
1399 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1400
1401 push_if_stack(p, insn);
1402 p->if_depth_in_loop[p->loop_stack_depth]++;
1403 return insn;
1404 }
1405
1406 /* This function is only used for gen6-style IF instructions with an
1407 * embedded comparison (conditional modifier). It is not used on gen7.
1408 */
1409 brw_inst *
1410 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1411 struct brw_reg src0, struct brw_reg src1)
1412 {
1413 const struct brw_device_info *devinfo = p->devinfo;
1414 brw_inst *insn;
1415
1416 insn = next_insn(p, BRW_OPCODE_IF);
1417
1418 brw_set_dest(p, insn, brw_imm_w(0));
1419 brw_inst_set_exec_size(devinfo, insn,
1420 brw_inst_exec_size(devinfo, p->current));
1421 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1422 brw_set_src0(p, insn, src0);
1423 brw_set_src1(p, insn, src1);
1424
1425 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1426 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1427 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1428
1429 push_if_stack(p, insn);
1430 return insn;
1431 }
1432
1433 /**
1434 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1435 */
1436 static void
1437 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1438 brw_inst *if_inst, brw_inst *else_inst)
1439 {
1440 const struct brw_device_info *devinfo = p->devinfo;
1441
1442 /* The next instruction (where the ENDIF would be, if it existed) */
1443 brw_inst *next_inst = &p->store[p->nr_insn];
1444
1445 assert(p->single_program_flow);
1446 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1447 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1448 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1449
1450 /* Convert IF to an ADD instruction that moves the instruction pointer
1451 * to the first instruction of the ELSE block. If there is no ELSE
1452 * block, point to where ENDIF would be. Reverse the predicate.
1453 *
1454 * There's no need to execute an ENDIF since we don't need to do any
1455 * stack operations, and if we're currently executing, we just want to
1456 * continue normally.
1457 */
1458 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1459 brw_inst_set_pred_inv(devinfo, if_inst, true);
1460
1461 if (else_inst != NULL) {
1462 /* Convert ELSE to an ADD instruction that points where the ENDIF
1463 * would be.
1464 */
1465 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1466
1467 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1468 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1469 } else {
1470 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1471 }
1472 }
1473
1474 /**
1475 * Patch IF and ELSE instructions with appropriate jump targets.
1476 */
1477 static void
1478 patch_IF_ELSE(struct brw_codegen *p,
1479 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1480 {
1481 const struct brw_device_info *devinfo = p->devinfo;
1482
1483 /* We shouldn't be patching IF and ELSE instructions in single program flow
1484 * mode when gen < 6, because in single program flow mode on those
1485 * platforms, we convert flow control instructions to conditional ADDs that
1486 * operate on IP (see brw_ENDIF).
1487 *
1488 * However, on Gen6, writing to IP doesn't work in single program flow mode
1489 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1490 * not be updated by non-flow control instructions."). And on later
1491 * platforms, there is no significant benefit to converting control flow
1492 * instructions to conditional ADDs. So we do patch IF and ELSE
1493 * instructions in single program flow mode on those platforms.
1494 */
1495 if (devinfo->gen < 6)
1496 assert(!p->single_program_flow);
1497
1498 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1499 assert(endif_inst != NULL);
1500 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1501
1502 unsigned br = brw_jump_scale(devinfo);
1503
1504 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1505 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1506
1507 if (else_inst == NULL) {
1508 /* Patch IF -> ENDIF */
1509 if (devinfo->gen < 6) {
1510 /* Turn it into an IFF, which means no mask stack operations for
1511 * all-false and jumping past the ENDIF.
1512 */
1513 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1514 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1515 br * (endif_inst - if_inst + 1));
1516 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1517 } else if (devinfo->gen == 6) {
1518 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1519 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1520 } else {
1521 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1522 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1523 }
1524 } else {
1525 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1526
1527 /* Patch IF -> ELSE */
1528 if (devinfo->gen < 6) {
1529 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1530 br * (else_inst - if_inst));
1531 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1532 } else if (devinfo->gen == 6) {
1533 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1534 br * (else_inst - if_inst + 1));
1535 }
1536
1537 /* Patch ELSE -> ENDIF */
1538 if (devinfo->gen < 6) {
1539 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1540 * matching ENDIF.
1541 */
1542 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1543 br * (endif_inst - else_inst + 1));
1544 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1545 } else if (devinfo->gen == 6) {
1546 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1547 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1548 br * (endif_inst - else_inst));
1549 } else {
1550 /* The IF instruction's JIP should point just past the ELSE */
1551 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1552 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1553 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1554 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1555 if (devinfo->gen >= 8) {
1556 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1557 * should point to ENDIF.
1558 */
1559 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1560 }
1561 }
1562 }
1563 }
1564
1565 void
1566 brw_ELSE(struct brw_codegen *p)
1567 {
1568 const struct brw_device_info *devinfo = p->devinfo;
1569 brw_inst *insn;
1570
1571 insn = next_insn(p, BRW_OPCODE_ELSE);
1572
1573 if (devinfo->gen < 6) {
1574 brw_set_dest(p, insn, brw_ip_reg());
1575 brw_set_src0(p, insn, brw_ip_reg());
1576 brw_set_src1(p, insn, brw_imm_d(0x0));
1577 } else if (devinfo->gen == 6) {
1578 brw_set_dest(p, insn, brw_imm_w(0));
1579 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1580 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1581 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1582 } else if (devinfo->gen == 7) {
1583 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1584 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1585 brw_set_src1(p, insn, brw_imm_w(0));
1586 brw_inst_set_jip(devinfo, insn, 0);
1587 brw_inst_set_uip(devinfo, insn, 0);
1588 } else {
1589 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1590 brw_set_src0(p, insn, brw_imm_d(0));
1591 brw_inst_set_jip(devinfo, insn, 0);
1592 brw_inst_set_uip(devinfo, insn, 0);
1593 }
1594
1595 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1596 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1597 if (!p->single_program_flow && devinfo->gen < 6)
1598 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1599
1600 push_if_stack(p, insn);
1601 }
1602
1603 void
1604 brw_ENDIF(struct brw_codegen *p)
1605 {
1606 const struct brw_device_info *devinfo = p->devinfo;
1607 brw_inst *insn = NULL;
1608 brw_inst *else_inst = NULL;
1609 brw_inst *if_inst = NULL;
1610 brw_inst *tmp;
1611 bool emit_endif = true;
1612
1613 /* In single program flow mode, we can express IF and ELSE instructions
1614 * equivalently as ADD instructions that operate on IP. On platforms prior
1615 * to Gen6, flow control instructions cause an implied thread switch, so
1616 * this is a significant savings.
1617 *
1618 * However, on Gen6, writing to IP doesn't work in single program flow mode
1619 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1620 * not be updated by non-flow control instructions."). And on later
1621 * platforms, there is no significant benefit to converting control flow
1622 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1623 * Gen5.
1624 */
1625 if (devinfo->gen < 6 && p->single_program_flow)
1626 emit_endif = false;
1627
1628 /*
1629 * A single next_insn() may change the base address of instruction store
1630 * memory(p->store), so call it first before referencing the instruction
1631 * store pointer from an index
1632 */
1633 if (emit_endif)
1634 insn = next_insn(p, BRW_OPCODE_ENDIF);
1635
1636 /* Pop the IF and (optional) ELSE instructions from the stack */
1637 p->if_depth_in_loop[p->loop_stack_depth]--;
1638 tmp = pop_if_stack(p);
1639 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1640 else_inst = tmp;
1641 tmp = pop_if_stack(p);
1642 }
1643 if_inst = tmp;
1644
1645 if (!emit_endif) {
1646 /* ENDIF is useless; don't bother emitting it. */
1647 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1648 return;
1649 }
1650
1651 if (devinfo->gen < 6) {
1652 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1653 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1654 brw_set_src1(p, insn, brw_imm_d(0x0));
1655 } else if (devinfo->gen == 6) {
1656 brw_set_dest(p, insn, brw_imm_w(0));
1657 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1658 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1659 } else if (devinfo->gen == 7) {
1660 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1661 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1662 brw_set_src1(p, insn, brw_imm_w(0));
1663 } else {
1664 brw_set_src0(p, insn, brw_imm_d(0));
1665 }
1666
1667 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1668 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1669 if (devinfo->gen < 6)
1670 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1671
1672 /* Also pop item off the stack in the endif instruction: */
1673 if (devinfo->gen < 6) {
1674 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1675 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1676 } else if (devinfo->gen == 6) {
1677 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1678 } else {
1679 brw_inst_set_jip(devinfo, insn, 2);
1680 }
1681 patch_IF_ELSE(p, if_inst, else_inst, insn);
1682 }
1683
1684 brw_inst *
1685 brw_BREAK(struct brw_codegen *p)
1686 {
1687 const struct brw_device_info *devinfo = p->devinfo;
1688 brw_inst *insn;
1689
1690 insn = next_insn(p, BRW_OPCODE_BREAK);
1691 if (devinfo->gen >= 8) {
1692 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1693 brw_set_src0(p, insn, brw_imm_d(0x0));
1694 } else if (devinfo->gen >= 6) {
1695 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1696 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1697 brw_set_src1(p, insn, brw_imm_d(0x0));
1698 } else {
1699 brw_set_dest(p, insn, brw_ip_reg());
1700 brw_set_src0(p, insn, brw_ip_reg());
1701 brw_set_src1(p, insn, brw_imm_d(0x0));
1702 brw_inst_set_gen4_pop_count(devinfo, insn,
1703 p->if_depth_in_loop[p->loop_stack_depth]);
1704 }
1705 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1706 brw_inst_set_exec_size(devinfo, insn,
1707 brw_inst_exec_size(devinfo, p->current));
1708
1709 return insn;
1710 }
1711
1712 brw_inst *
1713 brw_CONT(struct brw_codegen *p)
1714 {
1715 const struct brw_device_info *devinfo = p->devinfo;
1716 brw_inst *insn;
1717
1718 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1719 brw_set_dest(p, insn, brw_ip_reg());
1720 if (devinfo->gen >= 8) {
1721 brw_set_src0(p, insn, brw_imm_d(0x0));
1722 } else {
1723 brw_set_src0(p, insn, brw_ip_reg());
1724 brw_set_src1(p, insn, brw_imm_d(0x0));
1725 }
1726
1727 if (devinfo->gen < 6) {
1728 brw_inst_set_gen4_pop_count(devinfo, insn,
1729 p->if_depth_in_loop[p->loop_stack_depth]);
1730 }
1731 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1732 brw_inst_set_exec_size(devinfo, insn,
1733 brw_inst_exec_size(devinfo, p->current));
1734 return insn;
1735 }
1736
1737 brw_inst *
1738 gen6_HALT(struct brw_codegen *p)
1739 {
1740 const struct brw_device_info *devinfo = p->devinfo;
1741 brw_inst *insn;
1742
1743 insn = next_insn(p, BRW_OPCODE_HALT);
1744 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1745 if (devinfo->gen >= 8) {
1746 brw_set_src0(p, insn, brw_imm_d(0x0));
1747 } else {
1748 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1749 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1750 }
1751
1752 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1753 brw_inst_set_exec_size(devinfo, insn,
1754 brw_inst_exec_size(devinfo, p->current));
1755 return insn;
1756 }
1757
1758 /* DO/WHILE loop:
1759 *
1760 * The DO/WHILE is just an unterminated loop -- break or continue are
1761 * used for control within the loop. We have a few ways they can be
1762 * done.
1763 *
1764 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1765 * jip and no DO instruction.
1766 *
1767 * For non-uniform control flow pre-gen6, there's a DO instruction to
1768 * push the mask, and a WHILE to jump back, and BREAK to get out and
1769 * pop the mask.
1770 *
1771 * For gen6, there's no more mask stack, so no need for DO. WHILE
1772 * just points back to the first instruction of the loop.
1773 */
1774 brw_inst *
1775 brw_DO(struct brw_codegen *p, unsigned execute_size)
1776 {
1777 const struct brw_device_info *devinfo = p->devinfo;
1778
1779 if (devinfo->gen >= 6 || p->single_program_flow) {
1780 push_loop_stack(p, &p->store[p->nr_insn]);
1781 return &p->store[p->nr_insn];
1782 } else {
1783 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1784
1785 push_loop_stack(p, insn);
1786
1787 /* Override the defaults for this instruction:
1788 */
1789 brw_set_dest(p, insn, brw_null_reg());
1790 brw_set_src0(p, insn, brw_null_reg());
1791 brw_set_src1(p, insn, brw_null_reg());
1792
1793 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1794 brw_inst_set_exec_size(devinfo, insn, execute_size);
1795 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1796
1797 return insn;
1798 }
1799 }
1800
1801 /**
1802 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1803 * instruction here.
1804 *
1805 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1806 * nesting, since it can always just point to the end of the block/current loop.
1807 */
1808 static void
1809 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1810 {
1811 const struct brw_device_info *devinfo = p->devinfo;
1812 brw_inst *do_inst = get_inner_do_insn(p);
1813 brw_inst *inst;
1814 unsigned br = brw_jump_scale(devinfo);
1815
1816 assert(devinfo->gen < 6);
1817
1818 for (inst = while_inst - 1; inst != do_inst; inst--) {
1819 /* If the jump count is != 0, that means that this instruction has already
1820 * been patched because it's part of a loop inside of the one we're
1821 * patching.
1822 */
1823 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1824 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1825 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1826 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1827 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1828 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1829 }
1830 }
1831 }
1832
1833 brw_inst *
1834 brw_WHILE(struct brw_codegen *p)
1835 {
1836 const struct brw_device_info *devinfo = p->devinfo;
1837 brw_inst *insn, *do_insn;
1838 unsigned br = brw_jump_scale(devinfo);
1839
1840 if (devinfo->gen >= 6) {
1841 insn = next_insn(p, BRW_OPCODE_WHILE);
1842 do_insn = get_inner_do_insn(p);
1843
1844 if (devinfo->gen >= 8) {
1845 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1846 brw_set_src0(p, insn, brw_imm_d(0));
1847 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1848 } else if (devinfo->gen == 7) {
1849 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1850 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1851 brw_set_src1(p, insn, brw_imm_w(0));
1852 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1853 } else {
1854 brw_set_dest(p, insn, brw_imm_w(0));
1855 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1856 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1857 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1858 }
1859
1860 brw_inst_set_exec_size(devinfo, insn,
1861 brw_inst_exec_size(devinfo, p->current));
1862
1863 } else {
1864 if (p->single_program_flow) {
1865 insn = next_insn(p, BRW_OPCODE_ADD);
1866 do_insn = get_inner_do_insn(p);
1867
1868 brw_set_dest(p, insn, brw_ip_reg());
1869 brw_set_src0(p, insn, brw_ip_reg());
1870 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1871 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1872 } else {
1873 insn = next_insn(p, BRW_OPCODE_WHILE);
1874 do_insn = get_inner_do_insn(p);
1875
1876 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1877
1878 brw_set_dest(p, insn, brw_ip_reg());
1879 brw_set_src0(p, insn, brw_ip_reg());
1880 brw_set_src1(p, insn, brw_imm_d(0));
1881
1882 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1883 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1884 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1885
1886 brw_patch_break_cont(p, insn);
1887 }
1888 }
1889 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1890
1891 p->loop_stack_depth--;
1892
1893 return insn;
1894 }
1895
1896 /* FORWARD JUMPS:
1897 */
1898 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1899 {
1900 const struct brw_device_info *devinfo = p->devinfo;
1901 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1902 unsigned jmpi = 1;
1903
1904 if (devinfo->gen >= 5)
1905 jmpi = 2;
1906
1907 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1908 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1909
1910 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1911 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1912 }
1913
1914 /* To integrate with the above, it makes sense that the comparison
1915 * instruction should populate the flag register. It might be simpler
1916 * just to use the flag reg for most WM tasks?
1917 */
1918 void brw_CMP(struct brw_codegen *p,
1919 struct brw_reg dest,
1920 unsigned conditional,
1921 struct brw_reg src0,
1922 struct brw_reg src1)
1923 {
1924 const struct brw_device_info *devinfo = p->devinfo;
1925 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1926
1927 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1928 brw_set_dest(p, insn, dest);
1929 brw_set_src0(p, insn, src0);
1930 brw_set_src1(p, insn, src1);
1931
1932 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1933 * page says:
1934 * "Any CMP instruction with a null destination must use a {switch}."
1935 *
1936 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1937 * mentioned on their work-arounds pages.
1938 */
1939 if (devinfo->gen == 7) {
1940 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1941 dest.nr == BRW_ARF_NULL) {
1942 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1943 }
1944 }
1945 }
1946
1947 /***********************************************************************
1948 * Helpers for the various SEND message types:
1949 */
1950
1951 /** Extended math function, float[8].
1952 */
1953 void gen4_math(struct brw_codegen *p,
1954 struct brw_reg dest,
1955 unsigned function,
1956 unsigned msg_reg_nr,
1957 struct brw_reg src,
1958 unsigned precision )
1959 {
1960 const struct brw_device_info *devinfo = p->devinfo;
1961 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1962 unsigned data_type;
1963 if (has_scalar_region(src)) {
1964 data_type = BRW_MATH_DATA_SCALAR;
1965 } else {
1966 data_type = BRW_MATH_DATA_VECTOR;
1967 }
1968
1969 assert(devinfo->gen < 6);
1970
1971 /* Example code doesn't set predicate_control for send
1972 * instructions.
1973 */
1974 brw_inst_set_pred_control(devinfo, insn, 0);
1975 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1976
1977 brw_set_dest(p, insn, dest);
1978 brw_set_src0(p, insn, src);
1979 brw_set_math_message(p,
1980 insn,
1981 function,
1982 src.type == BRW_REGISTER_TYPE_D,
1983 precision,
1984 data_type);
1985 }
1986
1987 void gen6_math(struct brw_codegen *p,
1988 struct brw_reg dest,
1989 unsigned function,
1990 struct brw_reg src0,
1991 struct brw_reg src1)
1992 {
1993 const struct brw_device_info *devinfo = p->devinfo;
1994 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1995
1996 assert(devinfo->gen >= 6);
1997
1998 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1999 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
2000 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
2001 (devinfo->gen >= 8 && src0.file == BRW_IMMEDIATE_VALUE));
2002
2003 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
2004 if (devinfo->gen == 6) {
2005 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
2006 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
2007 }
2008
2009 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
2010 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
2011 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
2012 assert(src0.type != BRW_REGISTER_TYPE_F);
2013 assert(src1.type != BRW_REGISTER_TYPE_F);
2014 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
2015 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
2016 } else {
2017 assert(src0.type == BRW_REGISTER_TYPE_F);
2018 assert(src1.type == BRW_REGISTER_TYPE_F);
2019 if (function == BRW_MATH_FUNCTION_POW) {
2020 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
2021 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
2022 } else {
2023 assert(src1.file == BRW_ARCHITECTURE_REGISTER_FILE &&
2024 src1.nr == BRW_ARF_NULL);
2025 }
2026 }
2027
2028 /* Source modifiers are ignored for extended math instructions on Gen6. */
2029 if (devinfo->gen == 6) {
2030 assert(!src0.negate);
2031 assert(!src0.abs);
2032 assert(!src1.negate);
2033 assert(!src1.abs);
2034 }
2035
2036 brw_inst_set_math_function(devinfo, insn, function);
2037
2038 brw_set_dest(p, insn, dest);
2039 brw_set_src0(p, insn, src0);
2040 brw_set_src1(p, insn, src1);
2041 }
2042
2043 /**
2044 * Return the right surface index to access the thread scratch space using
2045 * stateless dataport messages.
2046 */
2047 unsigned
2048 brw_scratch_surface_idx(const struct brw_codegen *p)
2049 {
2050 /* The scratch space is thread-local so IA coherency is unnecessary. */
2051 if (p->devinfo->gen >= 8)
2052 return GEN8_BTI_STATELESS_NON_COHERENT;
2053 else
2054 return BRW_BTI_STATELESS;
2055 }
2056
2057 /**
2058 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
2059 * using a constant offset per channel.
2060 *
2061 * The offset must be aligned to oword size (16 bytes). Used for
2062 * register spilling.
2063 */
2064 void brw_oword_block_write_scratch(struct brw_codegen *p,
2065 struct brw_reg mrf,
2066 int num_regs,
2067 unsigned offset)
2068 {
2069 const struct brw_device_info *devinfo = p->devinfo;
2070 uint32_t msg_type;
2071
2072 if (devinfo->gen >= 6)
2073 offset /= 16;
2074
2075 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2076
2077 const unsigned mlen = 1 + num_regs;
2078 const unsigned msg_control =
2079 (num_regs == 1 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS :
2080 num_regs == 2 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS :
2081 num_regs == 4 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : 0);
2082 assert(msg_control);
2083
2084 /* Set up the message header. This is g0, with g0.2 filled with
2085 * the offset. We don't want to leave our offset around in g0 or
2086 * it'll screw up texture samples, so set it up inside the message
2087 * reg.
2088 */
2089 {
2090 brw_push_insn_state(p);
2091 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2092 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2093 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2094
2095 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2096
2097 /* set message header global offset field (reg 0, element 2) */
2098 brw_MOV(p,
2099 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2100 mrf.nr,
2101 2), BRW_REGISTER_TYPE_UD),
2102 brw_imm_ud(offset));
2103
2104 brw_pop_insn_state(p);
2105 }
2106
2107 {
2108 struct brw_reg dest;
2109 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2110 int send_commit_msg;
2111 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2112 BRW_REGISTER_TYPE_UW);
2113
2114 brw_inst_set_compression(devinfo, insn, false);
2115
2116 if (brw_inst_exec_size(devinfo, insn) >= 16)
2117 src_header = vec16(src_header);
2118
2119 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2120 if (devinfo->gen < 6)
2121 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2122
2123 /* Until gen6, writes followed by reads from the same location
2124 * are not guaranteed to be ordered unless write_commit is set.
2125 * If set, then a no-op write is issued to the destination
2126 * register to set a dependency, and a read from the destination
2127 * can be used to ensure the ordering.
2128 *
2129 * For gen6, only writes between different threads need ordering
2130 * protection. Our use of DP writes is all about register
2131 * spilling within a thread.
2132 */
2133 if (devinfo->gen >= 6) {
2134 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2135 send_commit_msg = 0;
2136 } else {
2137 dest = src_header;
2138 send_commit_msg = 1;
2139 }
2140
2141 brw_set_dest(p, insn, dest);
2142 if (devinfo->gen >= 6) {
2143 brw_set_src0(p, insn, mrf);
2144 } else {
2145 brw_set_src0(p, insn, brw_null_reg());
2146 }
2147
2148 if (devinfo->gen >= 6)
2149 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2150 else
2151 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2152
2153 brw_set_dp_write_message(p,
2154 insn,
2155 brw_scratch_surface_idx(p),
2156 msg_control,
2157 msg_type,
2158 mlen,
2159 true, /* header_present */
2160 0, /* not a render target */
2161 send_commit_msg, /* response_length */
2162 0, /* eot */
2163 send_commit_msg);
2164 }
2165 }
2166
2167
2168 /**
2169 * Read a block of owords (half a GRF each) from the scratch buffer
2170 * using a constant index per channel.
2171 *
2172 * Offset must be aligned to oword size (16 bytes). Used for register
2173 * spilling.
2174 */
2175 void
2176 brw_oword_block_read_scratch(struct brw_codegen *p,
2177 struct brw_reg dest,
2178 struct brw_reg mrf,
2179 int num_regs,
2180 unsigned offset)
2181 {
2182 const struct brw_device_info *devinfo = p->devinfo;
2183
2184 if (devinfo->gen >= 6)
2185 offset /= 16;
2186
2187 if (p->devinfo->gen >= 7) {
2188 /* On gen 7 and above, we no longer have message registers and we can
2189 * send from any register we want. By using the destination register
2190 * for the message, we guarantee that the implied message write won't
2191 * accidentally overwrite anything. This has been a problem because
2192 * the MRF registers and source for the final FB write are both fixed
2193 * and may overlap.
2194 */
2195 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2196 } else {
2197 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2198 }
2199 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2200
2201 const unsigned rlen = num_regs;
2202 const unsigned msg_control =
2203 (num_regs == 1 ? BRW_DATAPORT_OWORD_BLOCK_2_OWORDS :
2204 num_regs == 2 ? BRW_DATAPORT_OWORD_BLOCK_4_OWORDS :
2205 num_regs == 4 ? BRW_DATAPORT_OWORD_BLOCK_8_OWORDS : 0);
2206 assert(msg_control);
2207
2208 {
2209 brw_push_insn_state(p);
2210 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2211 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2212 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2213
2214 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2215
2216 /* set message header global offset field (reg 0, element 2) */
2217 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2218
2219 brw_pop_insn_state(p);
2220 }
2221
2222 {
2223 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2224
2225 assert(brw_inst_pred_control(devinfo, insn) == 0);
2226 brw_inst_set_compression(devinfo, insn, false);
2227
2228 brw_set_dest(p, insn, dest); /* UW? */
2229 if (devinfo->gen >= 6) {
2230 brw_set_src0(p, insn, mrf);
2231 } else {
2232 brw_set_src0(p, insn, brw_null_reg());
2233 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2234 }
2235
2236 brw_set_dp_read_message(p,
2237 insn,
2238 brw_scratch_surface_idx(p),
2239 msg_control,
2240 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2241 BRW_DATAPORT_READ_TARGET_RENDER_CACHE,
2242 1, /* msg_length */
2243 true, /* header_present */
2244 rlen);
2245 }
2246 }
2247
2248 void
2249 gen7_block_read_scratch(struct brw_codegen *p,
2250 struct brw_reg dest,
2251 int num_regs,
2252 unsigned offset)
2253 {
2254 const struct brw_device_info *devinfo = p->devinfo;
2255 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2256 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2257
2258 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2259
2260 /* The HW requires that the header is present; this is to get the g0.5
2261 * scratch offset.
2262 */
2263 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2264
2265 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2266 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2267 * is 32 bytes, which happens to be the size of a register.
2268 */
2269 offset /= REG_SIZE;
2270 assert(offset < (1 << 12));
2271
2272 gen7_set_dp_scratch_message(p, insn,
2273 false, /* scratch read */
2274 false, /* OWords */
2275 false, /* invalidate after read */
2276 num_regs,
2277 offset,
2278 1, /* mlen: just g0 */
2279 num_regs, /* rlen */
2280 true); /* header present */
2281 }
2282
2283 /**
2284 * Read a float[4] vector from the data port Data Cache (const buffer).
2285 * Location (in buffer) should be a multiple of 16.
2286 * Used for fetching shader constants.
2287 */
2288 void brw_oword_block_read(struct brw_codegen *p,
2289 struct brw_reg dest,
2290 struct brw_reg mrf,
2291 uint32_t offset,
2292 uint32_t bind_table_index)
2293 {
2294 const struct brw_device_info *devinfo = p->devinfo;
2295
2296 /* On newer hardware, offset is in units of owords. */
2297 if (devinfo->gen >= 6)
2298 offset /= 16;
2299
2300 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2301
2302 brw_push_insn_state(p);
2303 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2304 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2305 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2306 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2307
2308 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2309
2310 /* set message header global offset field (reg 0, element 2) */
2311 brw_MOV(p,
2312 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2313 mrf.nr,
2314 2), BRW_REGISTER_TYPE_UD),
2315 brw_imm_ud(offset));
2316
2317 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2318
2319 /* cast dest to a uword[8] vector */
2320 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2321
2322 brw_set_dest(p, insn, dest);
2323 if (devinfo->gen >= 6) {
2324 brw_set_src0(p, insn, mrf);
2325 } else {
2326 brw_set_src0(p, insn, brw_null_reg());
2327 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2328 }
2329
2330 brw_set_dp_read_message(p,
2331 insn,
2332 bind_table_index,
2333 BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW,
2334 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2335 BRW_DATAPORT_READ_TARGET_DATA_CACHE,
2336 1, /* msg_length */
2337 true, /* header_present */
2338 1); /* response_length (1 reg, 2 owords!) */
2339
2340 brw_pop_insn_state(p);
2341 }
2342
2343
2344 void brw_fb_WRITE(struct brw_codegen *p,
2345 struct brw_reg payload,
2346 struct brw_reg implied_header,
2347 unsigned msg_control,
2348 unsigned binding_table_index,
2349 unsigned msg_length,
2350 unsigned response_length,
2351 bool eot,
2352 bool last_render_target,
2353 bool header_present)
2354 {
2355 const struct brw_device_info *devinfo = p->devinfo;
2356 brw_inst *insn;
2357 unsigned msg_type;
2358 struct brw_reg dest, src0;
2359
2360 if (brw_inst_exec_size(devinfo, p->current) >= BRW_EXECUTE_16)
2361 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2362 else
2363 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2364
2365 if (devinfo->gen >= 6) {
2366 insn = next_insn(p, BRW_OPCODE_SENDC);
2367 } else {
2368 insn = next_insn(p, BRW_OPCODE_SEND);
2369 }
2370 brw_inst_set_compression(devinfo, insn, false);
2371
2372 if (devinfo->gen >= 6) {
2373 /* headerless version, just submit color payload */
2374 src0 = payload;
2375
2376 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2377 } else {
2378 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2379 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2380 src0 = implied_header;
2381
2382 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2383 }
2384
2385 brw_set_dest(p, insn, dest);
2386 brw_set_src0(p, insn, src0);
2387 brw_set_dp_write_message(p,
2388 insn,
2389 binding_table_index,
2390 msg_control,
2391 msg_type,
2392 msg_length,
2393 header_present,
2394 last_render_target,
2395 response_length,
2396 eot,
2397 0 /* send_commit_msg */);
2398 }
2399
2400
2401 /**
2402 * Texture sample instruction.
2403 * Note: the msg_type plus msg_length values determine exactly what kind
2404 * of sampling operation is performed. See volume 4, page 161 of docs.
2405 */
2406 void brw_SAMPLE(struct brw_codegen *p,
2407 struct brw_reg dest,
2408 unsigned msg_reg_nr,
2409 struct brw_reg src0,
2410 unsigned binding_table_index,
2411 unsigned sampler,
2412 unsigned msg_type,
2413 unsigned response_length,
2414 unsigned msg_length,
2415 unsigned header_present,
2416 unsigned simd_mode,
2417 unsigned return_format)
2418 {
2419 const struct brw_device_info *devinfo = p->devinfo;
2420 brw_inst *insn;
2421
2422 if (msg_reg_nr != -1)
2423 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2424
2425 insn = next_insn(p, BRW_OPCODE_SEND);
2426 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2427
2428 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2429 *
2430 * "Instruction compression is not allowed for this instruction (that
2431 * is, send). The hardware behavior is undefined if this instruction is
2432 * set as compressed. However, compress control can be set to "SecHalf"
2433 * to affect the EMask generation."
2434 *
2435 * No similar wording is found in later PRMs, but there are examples
2436 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2437 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2438 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2439 */
2440 brw_inst_set_compression(devinfo, insn, false);
2441
2442 if (devinfo->gen < 6)
2443 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2444
2445 brw_set_dest(p, insn, dest);
2446 brw_set_src0(p, insn, src0);
2447 brw_set_sampler_message(p, insn,
2448 binding_table_index,
2449 sampler,
2450 msg_type,
2451 response_length,
2452 msg_length,
2453 header_present,
2454 simd_mode,
2455 return_format);
2456 }
2457
2458 /* Adjust the message header's sampler state pointer to
2459 * select the correct group of 16 samplers.
2460 */
2461 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2462 struct brw_reg header,
2463 struct brw_reg sampler_index)
2464 {
2465 /* The "Sampler Index" field can only store values between 0 and 15.
2466 * However, we can add an offset to the "Sampler State Pointer"
2467 * field, effectively selecting a different set of 16 samplers.
2468 *
2469 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2470 * offset, and each sampler state is only 16-bytes, so we can't
2471 * exclusively use the offset - we have to use both.
2472 */
2473
2474 const struct brw_device_info *devinfo = p->devinfo;
2475
2476 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2477 const int sampler_state_size = 16; /* 16 bytes */
2478 uint32_t sampler = sampler_index.ud;
2479
2480 if (sampler >= 16) {
2481 assert(devinfo->is_haswell || devinfo->gen >= 8);
2482 brw_ADD(p,
2483 get_element_ud(header, 3),
2484 get_element_ud(brw_vec8_grf(0, 0), 3),
2485 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2486 }
2487 } else {
2488 /* Non-const sampler array indexing case */
2489 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2490 return;
2491 }
2492
2493 struct brw_reg temp = get_element_ud(header, 3);
2494
2495 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2496 brw_SHL(p, temp, temp, brw_imm_ud(4));
2497 brw_ADD(p,
2498 get_element_ud(header, 3),
2499 get_element_ud(brw_vec8_grf(0, 0), 3),
2500 temp);
2501 }
2502 }
2503
2504 /* All these variables are pretty confusing - we might be better off
2505 * using bitmasks and macros for this, in the old style. Or perhaps
2506 * just having the caller instantiate the fields in dword3 itself.
2507 */
2508 void brw_urb_WRITE(struct brw_codegen *p,
2509 struct brw_reg dest,
2510 unsigned msg_reg_nr,
2511 struct brw_reg src0,
2512 enum brw_urb_write_flags flags,
2513 unsigned msg_length,
2514 unsigned response_length,
2515 unsigned offset,
2516 unsigned swizzle)
2517 {
2518 const struct brw_device_info *devinfo = p->devinfo;
2519 brw_inst *insn;
2520
2521 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2522
2523 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2524 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2525 brw_push_insn_state(p);
2526 brw_set_default_access_mode(p, BRW_ALIGN_1);
2527 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2528 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2529 BRW_REGISTER_TYPE_UD),
2530 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2531 brw_imm_ud(0xff00));
2532 brw_pop_insn_state(p);
2533 }
2534
2535 insn = next_insn(p, BRW_OPCODE_SEND);
2536
2537 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2538
2539 brw_set_dest(p, insn, dest);
2540 brw_set_src0(p, insn, src0);
2541 brw_set_src1(p, insn, brw_imm_d(0));
2542
2543 if (devinfo->gen < 6)
2544 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2545
2546 brw_set_urb_message(p,
2547 insn,
2548 flags,
2549 msg_length,
2550 response_length,
2551 offset,
2552 swizzle);
2553 }
2554
2555 struct brw_inst *
2556 brw_send_indirect_message(struct brw_codegen *p,
2557 unsigned sfid,
2558 struct brw_reg dst,
2559 struct brw_reg payload,
2560 struct brw_reg desc)
2561 {
2562 const struct brw_device_info *devinfo = p->devinfo;
2563 struct brw_inst *send;
2564 int setup;
2565
2566 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2567
2568 assert(desc.type == BRW_REGISTER_TYPE_UD);
2569
2570 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2571 * in the indirect case) by its index in the instruction store. The
2572 * pointer returned by next_insn() may become invalid if emitting the SEND
2573 * in the indirect case reallocs the store.
2574 */
2575
2576 if (desc.file == BRW_IMMEDIATE_VALUE) {
2577 setup = p->nr_insn;
2578 send = next_insn(p, BRW_OPCODE_SEND);
2579 brw_set_src1(p, send, desc);
2580
2581 } else {
2582 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2583
2584 brw_push_insn_state(p);
2585 brw_set_default_access_mode(p, BRW_ALIGN_1);
2586 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2587 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2588
2589 /* Load the indirect descriptor to an address register using OR so the
2590 * caller can specify additional descriptor bits with the usual
2591 * brw_set_*_message() helper functions.
2592 */
2593 setup = p->nr_insn;
2594 brw_OR(p, addr, desc, brw_imm_ud(0));
2595
2596 brw_pop_insn_state(p);
2597
2598 send = next_insn(p, BRW_OPCODE_SEND);
2599 brw_set_src1(p, send, addr);
2600 }
2601
2602 if (dst.width < BRW_EXECUTE_8)
2603 brw_inst_set_exec_size(devinfo, send, dst.width);
2604
2605 brw_set_dest(p, send, dst);
2606 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2607 brw_inst_set_sfid(devinfo, send, sfid);
2608
2609 return &p->store[setup];
2610 }
2611
2612 static struct brw_inst *
2613 brw_send_indirect_surface_message(struct brw_codegen *p,
2614 unsigned sfid,
2615 struct brw_reg dst,
2616 struct brw_reg payload,
2617 struct brw_reg surface,
2618 unsigned message_len,
2619 unsigned response_len,
2620 bool header_present)
2621 {
2622 const struct brw_device_info *devinfo = p->devinfo;
2623 struct brw_inst *insn;
2624
2625 if (surface.file != BRW_IMMEDIATE_VALUE) {
2626 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2627
2628 brw_push_insn_state(p);
2629 brw_set_default_access_mode(p, BRW_ALIGN_1);
2630 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2631 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2632
2633 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2634 * some surface array is accessed out of bounds.
2635 */
2636 insn = brw_AND(p, addr,
2637 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2638 BRW_GET_SWZ(surface.swizzle, 0)),
2639 brw_imm_ud(0xff));
2640
2641 brw_pop_insn_state(p);
2642
2643 surface = addr;
2644 }
2645
2646 insn = brw_send_indirect_message(p, sfid, dst, payload, surface);
2647 brw_inst_set_mlen(devinfo, insn, message_len);
2648 brw_inst_set_rlen(devinfo, insn, response_len);
2649 brw_inst_set_header_present(devinfo, insn, header_present);
2650
2651 return insn;
2652 }
2653
2654 static bool
2655 while_jumps_before_offset(const struct brw_device_info *devinfo,
2656 brw_inst *insn, int while_offset, int start_offset)
2657 {
2658 int scale = 16 / brw_jump_scale(devinfo);
2659 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2660 : brw_inst_jip(devinfo, insn);
2661 return while_offset + jip * scale <= start_offset;
2662 }
2663
2664
2665 static int
2666 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2667 {
2668 int offset;
2669 void *store = p->store;
2670 const struct brw_device_info *devinfo = p->devinfo;
2671
2672 int depth = 0;
2673
2674 for (offset = next_offset(devinfo, store, start_offset);
2675 offset < p->next_insn_offset;
2676 offset = next_offset(devinfo, store, offset)) {
2677 brw_inst *insn = store + offset;
2678
2679 switch (brw_inst_opcode(devinfo, insn)) {
2680 case BRW_OPCODE_IF:
2681 depth++;
2682 break;
2683 case BRW_OPCODE_ENDIF:
2684 if (depth == 0)
2685 return offset;
2686 depth--;
2687 break;
2688 case BRW_OPCODE_WHILE:
2689 /* If the while doesn't jump before our instruction, it's the end
2690 * of a sibling do...while loop. Ignore it.
2691 */
2692 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2693 continue;
2694 /* fallthrough */
2695 case BRW_OPCODE_ELSE:
2696 case BRW_OPCODE_HALT:
2697 if (depth == 0)
2698 return offset;
2699 }
2700 }
2701
2702 return 0;
2703 }
2704
2705 /* There is no DO instruction on gen6, so to find the end of the loop
2706 * we have to see if the loop is jumping back before our start
2707 * instruction.
2708 */
2709 static int
2710 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2711 {
2712 const struct brw_device_info *devinfo = p->devinfo;
2713 int offset;
2714 void *store = p->store;
2715
2716 assert(devinfo->gen >= 6);
2717
2718 /* Always start after the instruction (such as a WHILE) we're trying to fix
2719 * up.
2720 */
2721 for (offset = next_offset(devinfo, store, start_offset);
2722 offset < p->next_insn_offset;
2723 offset = next_offset(devinfo, store, offset)) {
2724 brw_inst *insn = store + offset;
2725
2726 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2727 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2728 return offset;
2729 }
2730 }
2731 assert(!"not reached");
2732 return start_offset;
2733 }
2734
2735 /* After program generation, go back and update the UIP and JIP of
2736 * BREAK, CONT, and HALT instructions to their correct locations.
2737 */
2738 void
2739 brw_set_uip_jip(struct brw_codegen *p)
2740 {
2741 const struct brw_device_info *devinfo = p->devinfo;
2742 int offset;
2743 int br = brw_jump_scale(devinfo);
2744 int scale = 16 / br;
2745 void *store = p->store;
2746
2747 if (devinfo->gen < 6)
2748 return;
2749
2750 for (offset = 0; offset < p->next_insn_offset;
2751 offset = next_offset(devinfo, store, offset)) {
2752 brw_inst *insn = store + offset;
2753
2754 if (brw_inst_cmpt_control(devinfo, insn)) {
2755 /* Fixups for compacted BREAK/CONTINUE not supported yet. */
2756 assert(brw_inst_opcode(devinfo, insn) != BRW_OPCODE_BREAK &&
2757 brw_inst_opcode(devinfo, insn) != BRW_OPCODE_CONTINUE &&
2758 brw_inst_opcode(devinfo, insn) != BRW_OPCODE_HALT);
2759 continue;
2760 }
2761
2762 int block_end_offset = brw_find_next_block_end(p, offset);
2763 switch (brw_inst_opcode(devinfo, insn)) {
2764 case BRW_OPCODE_BREAK:
2765 assert(block_end_offset != 0);
2766 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2767 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2768 brw_inst_set_uip(devinfo, insn,
2769 (brw_find_loop_end(p, offset) - offset +
2770 (devinfo->gen == 6 ? 16 : 0)) / scale);
2771 break;
2772 case BRW_OPCODE_CONTINUE:
2773 assert(block_end_offset != 0);
2774 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2775 brw_inst_set_uip(devinfo, insn,
2776 (brw_find_loop_end(p, offset) - offset) / scale);
2777
2778 assert(brw_inst_uip(devinfo, insn) != 0);
2779 assert(brw_inst_jip(devinfo, insn) != 0);
2780 break;
2781
2782 case BRW_OPCODE_ENDIF: {
2783 int32_t jump = (block_end_offset == 0) ?
2784 1 * br : (block_end_offset - offset) / scale;
2785 if (devinfo->gen >= 7)
2786 brw_inst_set_jip(devinfo, insn, jump);
2787 else
2788 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2789 break;
2790 }
2791
2792 case BRW_OPCODE_HALT:
2793 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2794 *
2795 * "In case of the halt instruction not inside any conditional
2796 * code block, the value of <JIP> and <UIP> should be the
2797 * same. In case of the halt instruction inside conditional code
2798 * block, the <UIP> should be the end of the program, and the
2799 * <JIP> should be end of the most inner conditional code block."
2800 *
2801 * The uip will have already been set by whoever set up the
2802 * instruction.
2803 */
2804 if (block_end_offset == 0) {
2805 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2806 } else {
2807 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2808 }
2809 assert(brw_inst_uip(devinfo, insn) != 0);
2810 assert(brw_inst_jip(devinfo, insn) != 0);
2811 break;
2812 }
2813 }
2814 }
2815
2816 void brw_ff_sync(struct brw_codegen *p,
2817 struct brw_reg dest,
2818 unsigned msg_reg_nr,
2819 struct brw_reg src0,
2820 bool allocate,
2821 unsigned response_length,
2822 bool eot)
2823 {
2824 const struct brw_device_info *devinfo = p->devinfo;
2825 brw_inst *insn;
2826
2827 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2828
2829 insn = next_insn(p, BRW_OPCODE_SEND);
2830 brw_set_dest(p, insn, dest);
2831 brw_set_src0(p, insn, src0);
2832 brw_set_src1(p, insn, brw_imm_d(0));
2833
2834 if (devinfo->gen < 6)
2835 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2836
2837 brw_set_ff_sync_message(p,
2838 insn,
2839 allocate,
2840 response_length,
2841 eot);
2842 }
2843
2844 /**
2845 * Emit the SEND instruction necessary to generate stream output data on Gen6
2846 * (for transform feedback).
2847 *
2848 * If send_commit_msg is true, this is the last piece of stream output data
2849 * from this thread, so send the data as a committed write. According to the
2850 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2851 *
2852 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2853 * writes are complete by sending the final write as a committed write."
2854 */
2855 void
2856 brw_svb_write(struct brw_codegen *p,
2857 struct brw_reg dest,
2858 unsigned msg_reg_nr,
2859 struct brw_reg src0,
2860 unsigned binding_table_index,
2861 bool send_commit_msg)
2862 {
2863 brw_inst *insn;
2864
2865 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2866
2867 insn = next_insn(p, BRW_OPCODE_SEND);
2868 brw_set_dest(p, insn, dest);
2869 brw_set_src0(p, insn, src0);
2870 brw_set_src1(p, insn, brw_imm_d(0));
2871 brw_set_dp_write_message(p, insn,
2872 binding_table_index,
2873 0, /* msg_control: ignored */
2874 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2875 1, /* msg_length */
2876 true, /* header_present */
2877 0, /* last_render_target: ignored */
2878 send_commit_msg, /* response_length */
2879 0, /* end_of_thread */
2880 send_commit_msg); /* send_commit_msg */
2881 }
2882
2883 static unsigned
2884 brw_surface_payload_size(struct brw_codegen *p,
2885 unsigned num_channels,
2886 bool has_simd4x2,
2887 bool has_simd16)
2888 {
2889 if (has_simd4x2 &&
2890 brw_inst_access_mode(p->devinfo, p->current) == BRW_ALIGN_16)
2891 return 1;
2892 else if (has_simd16 &&
2893 brw_inst_exec_size(p->devinfo, p->current) == BRW_EXECUTE_16)
2894 return 2 * num_channels;
2895 else
2896 return num_channels;
2897 }
2898
2899 static void
2900 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2901 brw_inst *insn,
2902 unsigned atomic_op,
2903 bool response_expected)
2904 {
2905 const struct brw_device_info *devinfo = p->devinfo;
2906 unsigned msg_control =
2907 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2908 (response_expected ? 1 << 5 : 0); /* Return data expected */
2909
2910 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2911 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2912 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2913 msg_control |= 1 << 4; /* SIMD8 mode */
2914
2915 brw_inst_set_dp_msg_type(devinfo, insn,
2916 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2917 } else {
2918 brw_inst_set_dp_msg_type(devinfo, insn,
2919 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2920 }
2921 } else {
2922 brw_inst_set_dp_msg_type(devinfo, insn,
2923 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2924
2925 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2926 msg_control |= 1 << 4; /* SIMD8 mode */
2927 }
2928
2929 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2930 }
2931
2932 void
2933 brw_untyped_atomic(struct brw_codegen *p,
2934 struct brw_reg dst,
2935 struct brw_reg payload,
2936 struct brw_reg surface,
2937 unsigned atomic_op,
2938 unsigned msg_length,
2939 bool response_expected)
2940 {
2941 const struct brw_device_info *devinfo = p->devinfo;
2942 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2943 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2944 GEN7_SFID_DATAPORT_DATA_CACHE);
2945 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2946 /* Mask out unused components -- This is especially important in Align16
2947 * mode on generations that don't have native support for SIMD4x2 atomics,
2948 * because unused but enabled components will cause the dataport to perform
2949 * additional atomic operations on the addresses that happen to be in the
2950 * uninitialized Y, Z and W coordinates of the payload.
2951 */
2952 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2953 struct brw_inst *insn = brw_send_indirect_surface_message(
2954 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2955 brw_surface_payload_size(p, response_expected,
2956 devinfo->gen >= 8 || devinfo->is_haswell, true),
2957 align1);
2958
2959 brw_set_dp_untyped_atomic_message(
2960 p, insn, atomic_op, response_expected);
2961 }
2962
2963 static void
2964 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2965 struct brw_inst *insn,
2966 unsigned num_channels)
2967 {
2968 const struct brw_device_info *devinfo = p->devinfo;
2969 /* Set mask of 32-bit channels to drop. */
2970 unsigned msg_control = 0xf & (0xf << num_channels);
2971
2972 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2973 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2974 msg_control |= 1 << 4; /* SIMD16 mode */
2975 else
2976 msg_control |= 2 << 4; /* SIMD8 mode */
2977 }
2978
2979 brw_inst_set_dp_msg_type(devinfo, insn,
2980 (devinfo->gen >= 8 || devinfo->is_haswell ?
2981 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
2982 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
2983 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2984 }
2985
2986 void
2987 brw_untyped_surface_read(struct brw_codegen *p,
2988 struct brw_reg dst,
2989 struct brw_reg payload,
2990 struct brw_reg surface,
2991 unsigned msg_length,
2992 unsigned num_channels)
2993 {
2994 const struct brw_device_info *devinfo = p->devinfo;
2995 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2996 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2997 GEN7_SFID_DATAPORT_DATA_CACHE);
2998 struct brw_inst *insn = brw_send_indirect_surface_message(
2999 p, sfid, dst, payload, surface, msg_length,
3000 brw_surface_payload_size(p, num_channels, true, true),
3001 false);
3002
3003 brw_set_dp_untyped_surface_read_message(
3004 p, insn, num_channels);
3005 }
3006
3007 static void
3008 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
3009 struct brw_inst *insn,
3010 unsigned num_channels)
3011 {
3012 const struct brw_device_info *devinfo = p->devinfo;
3013 /* Set mask of 32-bit channels to drop. */
3014 unsigned msg_control = 0xf & (0xf << num_channels);
3015
3016 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3017 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
3018 msg_control |= 1 << 4; /* SIMD16 mode */
3019 else
3020 msg_control |= 2 << 4; /* SIMD8 mode */
3021 } else {
3022 if (devinfo->gen >= 8 || devinfo->is_haswell)
3023 msg_control |= 0 << 4; /* SIMD4x2 mode */
3024 else
3025 msg_control |= 2 << 4; /* SIMD8 mode */
3026 }
3027
3028 brw_inst_set_dp_msg_type(devinfo, insn,
3029 devinfo->gen >= 8 || devinfo->is_haswell ?
3030 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
3031 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
3032 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3033 }
3034
3035 void
3036 brw_untyped_surface_write(struct brw_codegen *p,
3037 struct brw_reg payload,
3038 struct brw_reg surface,
3039 unsigned msg_length,
3040 unsigned num_channels)
3041 {
3042 const struct brw_device_info *devinfo = p->devinfo;
3043 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3044 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3045 GEN7_SFID_DATAPORT_DATA_CACHE);
3046 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3047 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3048 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3049 WRITEMASK_X : WRITEMASK_XYZW;
3050 struct brw_inst *insn = brw_send_indirect_surface_message(
3051 p, sfid, brw_writemask(brw_null_reg(), mask),
3052 payload, surface, msg_length, 0, align1);
3053
3054 brw_set_dp_untyped_surface_write_message(
3055 p, insn, num_channels);
3056 }
3057
3058 static void
3059 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3060 struct brw_inst *insn,
3061 unsigned atomic_op,
3062 bool response_expected)
3063 {
3064 const struct brw_device_info *devinfo = p->devinfo;
3065 unsigned msg_control =
3066 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3067 (response_expected ? 1 << 5 : 0); /* Return data expected */
3068
3069 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3070 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3071 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3072 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3073
3074 brw_inst_set_dp_msg_type(devinfo, insn,
3075 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3076 } else {
3077 brw_inst_set_dp_msg_type(devinfo, insn,
3078 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3079 }
3080
3081 } else {
3082 brw_inst_set_dp_msg_type(devinfo, insn,
3083 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3084
3085 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3086 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3087 }
3088
3089 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3090 }
3091
3092 void
3093 brw_typed_atomic(struct brw_codegen *p,
3094 struct brw_reg dst,
3095 struct brw_reg payload,
3096 struct brw_reg surface,
3097 unsigned atomic_op,
3098 unsigned msg_length,
3099 bool response_expected) {
3100 const struct brw_device_info *devinfo = p->devinfo;
3101 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3102 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3103 GEN6_SFID_DATAPORT_RENDER_CACHE);
3104 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3105 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3106 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3107 struct brw_inst *insn = brw_send_indirect_surface_message(
3108 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3109 brw_surface_payload_size(p, response_expected,
3110 devinfo->gen >= 8 || devinfo->is_haswell, false),
3111 true);
3112
3113 brw_set_dp_typed_atomic_message(
3114 p, insn, atomic_op, response_expected);
3115 }
3116
3117 static void
3118 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3119 struct brw_inst *insn,
3120 unsigned num_channels)
3121 {
3122 const struct brw_device_info *devinfo = p->devinfo;
3123 /* Set mask of unused channels. */
3124 unsigned msg_control = 0xf & (0xf << num_channels);
3125
3126 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3127 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3128 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3129 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3130 else
3131 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3132 }
3133
3134 brw_inst_set_dp_msg_type(devinfo, insn,
3135 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3136 } else {
3137 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3138 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3139 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3140 }
3141
3142 brw_inst_set_dp_msg_type(devinfo, insn,
3143 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3144 }
3145
3146 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3147 }
3148
3149 void
3150 brw_typed_surface_read(struct brw_codegen *p,
3151 struct brw_reg dst,
3152 struct brw_reg payload,
3153 struct brw_reg surface,
3154 unsigned msg_length,
3155 unsigned num_channels)
3156 {
3157 const struct brw_device_info *devinfo = p->devinfo;
3158 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3159 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3160 GEN6_SFID_DATAPORT_RENDER_CACHE);
3161 struct brw_inst *insn = brw_send_indirect_surface_message(
3162 p, sfid, dst, payload, surface, msg_length,
3163 brw_surface_payload_size(p, num_channels,
3164 devinfo->gen >= 8 || devinfo->is_haswell, false),
3165 true);
3166
3167 brw_set_dp_typed_surface_read_message(
3168 p, insn, num_channels);
3169 }
3170
3171 static void
3172 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3173 struct brw_inst *insn,
3174 unsigned num_channels)
3175 {
3176 const struct brw_device_info *devinfo = p->devinfo;
3177 /* Set mask of unused channels. */
3178 unsigned msg_control = 0xf & (0xf << num_channels);
3179
3180 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3181 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3182 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3183 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3184 else
3185 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3186 }
3187
3188 brw_inst_set_dp_msg_type(devinfo, insn,
3189 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3190
3191 } else {
3192 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3193 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3194 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3195 }
3196
3197 brw_inst_set_dp_msg_type(devinfo, insn,
3198 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3199 }
3200
3201 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3202 }
3203
3204 void
3205 brw_typed_surface_write(struct brw_codegen *p,
3206 struct brw_reg payload,
3207 struct brw_reg surface,
3208 unsigned msg_length,
3209 unsigned num_channels)
3210 {
3211 const struct brw_device_info *devinfo = p->devinfo;
3212 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3213 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3214 GEN6_SFID_DATAPORT_RENDER_CACHE);
3215 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3216 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3217 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3218 WRITEMASK_X : WRITEMASK_XYZW);
3219 struct brw_inst *insn = brw_send_indirect_surface_message(
3220 p, sfid, brw_writemask(brw_null_reg(), mask),
3221 payload, surface, msg_length, 0, true);
3222
3223 brw_set_dp_typed_surface_write_message(
3224 p, insn, num_channels);
3225 }
3226
3227 static void
3228 brw_set_memory_fence_message(struct brw_codegen *p,
3229 struct brw_inst *insn,
3230 enum brw_message_target sfid,
3231 bool commit_enable)
3232 {
3233 const struct brw_device_info *devinfo = p->devinfo;
3234
3235 brw_set_message_descriptor(p, insn, sfid,
3236 1 /* message length */,
3237 (commit_enable ? 1 : 0) /* response length */,
3238 true /* header present */,
3239 false);
3240
3241 switch (sfid) {
3242 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3243 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3244 break;
3245 case GEN7_SFID_DATAPORT_DATA_CACHE:
3246 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3247 break;
3248 default:
3249 unreachable("Not reached");
3250 }
3251
3252 if (commit_enable)
3253 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3254 }
3255
3256 void
3257 brw_memory_fence(struct brw_codegen *p,
3258 struct brw_reg dst)
3259 {
3260 const struct brw_device_info *devinfo = p->devinfo;
3261 const bool commit_enable = devinfo->gen == 7 && !devinfo->is_haswell;
3262 struct brw_inst *insn;
3263
3264 brw_push_insn_state(p);
3265 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3266 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3267 dst = vec1(dst);
3268
3269 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3270 * message doesn't write anything back.
3271 */
3272 insn = next_insn(p, BRW_OPCODE_SEND);
3273 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3274 brw_set_dest(p, insn, dst);
3275 brw_set_src0(p, insn, dst);
3276 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3277 commit_enable);
3278
3279 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3280 /* IVB does typed surface access through the render cache, so we need to
3281 * flush it too. Use a different register so both flushes can be
3282 * pipelined by the hardware.
3283 */
3284 insn = next_insn(p, BRW_OPCODE_SEND);
3285 brw_set_dest(p, insn, offset(dst, 1));
3286 brw_set_src0(p, insn, offset(dst, 1));
3287 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3288 commit_enable);
3289
3290 /* Now write the response of the second message into the response of the
3291 * first to trigger a pipeline stall -- This way future render and data
3292 * cache messages will be properly ordered with respect to past data and
3293 * render cache messages.
3294 */
3295 brw_MOV(p, dst, offset(dst, 1));
3296 }
3297
3298 brw_pop_insn_state(p);
3299 }
3300
3301 void
3302 brw_pixel_interpolator_query(struct brw_codegen *p,
3303 struct brw_reg dest,
3304 struct brw_reg mrf,
3305 bool noperspective,
3306 unsigned mode,
3307 struct brw_reg data,
3308 unsigned msg_length,
3309 unsigned response_length)
3310 {
3311 const struct brw_device_info *devinfo = p->devinfo;
3312 struct brw_inst *insn;
3313 const uint16_t exec_size = brw_inst_exec_size(devinfo, p->current);
3314
3315 /* brw_send_indirect_message will automatically use a direct send message
3316 * if data is actually immediate.
3317 */
3318 insn = brw_send_indirect_message(p,
3319 GEN7_SFID_PIXEL_INTERPOLATOR,
3320 dest,
3321 mrf,
3322 vec1(data));
3323 brw_inst_set_mlen(devinfo, insn, msg_length);
3324 brw_inst_set_rlen(devinfo, insn, response_length);
3325
3326 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3327 brw_inst_set_pi_slot_group(devinfo, insn, 0); /* zero unless 32/64px dispatch */
3328 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3329 brw_inst_set_pi_message_type(devinfo, insn, mode);
3330 }
3331
3332 void
3333 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst)
3334 {
3335 const struct brw_device_info *devinfo = p->devinfo;
3336 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
3337 const unsigned qtr_control = brw_inst_qtr_control(devinfo, p->current);
3338 brw_inst *inst;
3339
3340 assert(devinfo->gen >= 7);
3341
3342 brw_push_insn_state(p);
3343
3344 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3345 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3346
3347 if (devinfo->gen >= 8) {
3348 /* Getting the first active channel index is easy on Gen8: Just find
3349 * the first bit set in the mask register. The same register exists
3350 * on HSW already but it reads back as all ones when the current
3351 * instruction has execution masking disabled, so it's kind of
3352 * useless.
3353 */
3354 inst = brw_FBL(p, vec1(dst),
3355 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD));
3356
3357 /* Quarter control has the effect of magically shifting the value of
3358 * this register so you'll get the first active channel relative to
3359 * the specified quarter control as result.
3360 */
3361 } else {
3362 const struct brw_reg flag = brw_flag_reg(1, 0);
3363
3364 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3365
3366 /* Run enough instructions returning zero with execution masking and
3367 * a conditional modifier enabled in order to get the full execution
3368 * mask in f1.0. We could use a single 32-wide move here if it
3369 * weren't because of the hardware bug that causes channel enables to
3370 * be applied incorrectly to the second half of 32-wide instructions
3371 * on Gen7.
3372 */
3373 const unsigned lower_size = MIN2(16, exec_size);
3374 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3375 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3376 brw_imm_uw(0));
3377 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3378 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3379 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3380 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3381 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3382 }
3383
3384 /* Find the first bit set in the exec_size-wide portion of the flag
3385 * register that was updated by the last sequence of MOV
3386 * instructions.
3387 */
3388 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3389 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3390 }
3391 } else {
3392 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3393
3394 if (devinfo->gen >= 8) {
3395 /* In SIMD4x2 mode the first active channel index is just the
3396 * negation of the first bit of the mask register.
3397 */
3398 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3399 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3400 brw_imm_ud(1));
3401
3402 } else {
3403 /* Overwrite the destination without and with execution masking to
3404 * find out which of the channels is active.
3405 */
3406 brw_push_insn_state(p);
3407 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3408 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3409 brw_imm_ud(1));
3410
3411 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3412 brw_imm_ud(0));
3413 brw_pop_insn_state(p);
3414 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3415 }
3416 }
3417
3418 brw_pop_insn_state(p);
3419 }
3420
3421 void
3422 brw_broadcast(struct brw_codegen *p,
3423 struct brw_reg dst,
3424 struct brw_reg src,
3425 struct brw_reg idx)
3426 {
3427 const struct brw_device_info *devinfo = p->devinfo;
3428 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3429 brw_inst *inst;
3430
3431 brw_push_insn_state(p);
3432 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3433 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3434
3435 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3436 src.address_mode == BRW_ADDRESS_DIRECT);
3437
3438 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3439 idx.file == BRW_IMMEDIATE_VALUE) {
3440 /* Trivial, the source is already uniform or the index is a constant.
3441 * We will typically not get here if the optimizer is doing its job, but
3442 * asserting would be mean.
3443 */
3444 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3445 brw_MOV(p, dst,
3446 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3447 stride(suboffset(src, 4 * i), 0, 4, 1)));
3448 } else {
3449 if (align1) {
3450 const struct brw_reg addr =
3451 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3452 const unsigned offset = src.nr * REG_SIZE + src.subnr;
3453 /* Limit in bytes of the signed indirect addressing immediate. */
3454 const unsigned limit = 512;
3455
3456 brw_push_insn_state(p);
3457 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3458 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3459
3460 /* Take into account the component size and horizontal stride. */
3461 assert(src.vstride == src.hstride + src.width);
3462 brw_SHL(p, addr, vec1(idx),
3463 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3464 src.hstride - 1));
3465
3466 /* We can only address up to limit bytes using the indirect
3467 * addressing immediate, account for the difference if the source
3468 * register is above this limit.
3469 */
3470 if (offset >= limit)
3471 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3472
3473 brw_pop_insn_state(p);
3474
3475 /* Use indirect addressing to fetch the specified component. */
3476 brw_MOV(p, dst,
3477 retype(brw_vec1_indirect(addr.subnr, offset % limit),
3478 src.type));
3479 } else {
3480 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3481 * to all bits of a flag register,
3482 */
3483 inst = brw_MOV(p,
3484 brw_null_reg(),
3485 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3486 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3487 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3488 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3489
3490 /* and use predicated SEL to pick the right channel. */
3491 inst = brw_SEL(p, dst,
3492 stride(suboffset(src, 4), 4, 4, 1),
3493 stride(src, 4, 4, 1));
3494 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3495 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3496 }
3497 }
3498
3499 brw_pop_insn_state(p);
3500 }
3501
3502 /**
3503 * This instruction is generated as a single-channel align1 instruction by
3504 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3505 *
3506 * We can't use the typed atomic op in the FS because that has the execution
3507 * mask ANDed with the pixel mask, but we just want to write the one dword for
3508 * all the pixels.
3509 *
3510 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3511 * one u32. So we use the same untyped atomic write message as the pixel
3512 * shader.
3513 *
3514 * The untyped atomic operation requires a BUFFER surface type with RAW
3515 * format, and is only accessible through the legacy DATA_CACHE dataport
3516 * messages.
3517 */
3518 void brw_shader_time_add(struct brw_codegen *p,
3519 struct brw_reg payload,
3520 uint32_t surf_index)
3521 {
3522 const unsigned sfid = (p->devinfo->gen >= 8 || p->devinfo->is_haswell ?
3523 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3524 GEN7_SFID_DATAPORT_DATA_CACHE);
3525 assert(p->devinfo->gen >= 7);
3526
3527 brw_push_insn_state(p);
3528 brw_set_default_access_mode(p, BRW_ALIGN_1);
3529 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3530 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3531 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3532
3533 /* We use brw_vec1_reg and unmasked because we want to increment the given
3534 * offset only once.
3535 */
3536 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3537 BRW_ARF_NULL, 0));
3538 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3539 payload.nr, 0));
3540 brw_set_src1(p, send, brw_imm_ud(0));
3541 brw_set_message_descriptor(p, send, sfid, 2, 0, false, false);
3542 brw_inst_set_binding_table_index(p->devinfo, send, surf_index);
3543 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3544
3545 brw_pop_insn_state(p);
3546 }
3547
3548
3549 /**
3550 * Emit the SEND message for a barrier
3551 */
3552 void
3553 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3554 {
3555 const struct brw_device_info *devinfo = p->devinfo;
3556 struct brw_inst *inst;
3557
3558 assert(devinfo->gen >= 7);
3559
3560 inst = next_insn(p, BRW_OPCODE_SEND);
3561 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3562 brw_set_src0(p, inst, src);
3563 brw_set_src1(p, inst, brw_null_reg());
3564
3565 brw_set_message_descriptor(p, inst, BRW_SFID_MESSAGE_GATEWAY,
3566 1 /* msg_length */,
3567 0 /* response_length */,
3568 false /* header_present */,
3569 false /* end_of_thread */);
3570
3571 brw_inst_set_gateway_notify(devinfo, inst, 1);
3572 brw_inst_set_gateway_subfuncid(devinfo, inst,
3573 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3574
3575 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3576 }
3577
3578
3579 /**
3580 * Emit the wait instruction for a barrier
3581 */
3582 void
3583 brw_WAIT(struct brw_codegen *p)
3584 {
3585 const struct brw_device_info *devinfo = p->devinfo;
3586 struct brw_inst *insn;
3587
3588 struct brw_reg src = brw_notification_reg();
3589
3590 insn = next_insn(p, BRW_OPCODE_WAIT);
3591 brw_set_dest(p, insn, src);
3592 brw_set_src0(p, insn, src);
3593 brw_set_src1(p, insn, brw_null_reg());
3594
3595 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3596 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3597 }