69ebf6345c34e819d512aaedf977f9a9acf7a17e
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 /**
88 * Convert a brw_reg_type enumeration value into the hardware representation.
89 *
90 * The hardware encoding may depend on whether the value is an immediate.
91 */
92 unsigned
93 brw_reg_type_to_hw_type(const struct gen_device_info *devinfo,
94 enum brw_reg_type type, enum brw_reg_file file)
95 {
96 if (file == BRW_IMMEDIATE_VALUE) {
97 static const enum hw_imm_type hw_types[] = {
98 [0 ... BRW_REGISTER_TYPE_LAST] = -1,
99 [BRW_REGISTER_TYPE_UD] = BRW_HW_IMM_TYPE_UD,
100 [BRW_REGISTER_TYPE_D] = BRW_HW_IMM_TYPE_D,
101 [BRW_REGISTER_TYPE_UW] = BRW_HW_IMM_TYPE_UW,
102 [BRW_REGISTER_TYPE_W] = BRW_HW_IMM_TYPE_W,
103 [BRW_REGISTER_TYPE_F] = BRW_HW_IMM_TYPE_F,
104 [BRW_REGISTER_TYPE_UV] = BRW_HW_IMM_TYPE_UV,
105 [BRW_REGISTER_TYPE_VF] = BRW_HW_IMM_TYPE_VF,
106 [BRW_REGISTER_TYPE_V] = BRW_HW_IMM_TYPE_V,
107 [BRW_REGISTER_TYPE_DF] = GEN8_HW_IMM_TYPE_DF,
108 [BRW_REGISTER_TYPE_HF] = GEN8_HW_IMM_TYPE_HF,
109 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_IMM_TYPE_UQ,
110 [BRW_REGISTER_TYPE_Q] = GEN8_HW_IMM_TYPE_Q,
111 };
112 assert(type < ARRAY_SIZE(hw_types));
113 assert(hw_types[type] != -1);
114 return hw_types[type];
115 } else {
116 /* Non-immediate registers */
117 static const enum hw_reg_type hw_types[] = {
118 [0 ... BRW_REGISTER_TYPE_LAST] = -1,
119 [BRW_REGISTER_TYPE_UD] = BRW_HW_REG_TYPE_UD,
120 [BRW_REGISTER_TYPE_D] = BRW_HW_REG_TYPE_D,
121 [BRW_REGISTER_TYPE_UW] = BRW_HW_REG_TYPE_UW,
122 [BRW_REGISTER_TYPE_W] = BRW_HW_REG_TYPE_W,
123 [BRW_REGISTER_TYPE_UB] = BRW_HW_REG_TYPE_UB,
124 [BRW_REGISTER_TYPE_B] = BRW_HW_REG_TYPE_B,
125 [BRW_REGISTER_TYPE_F] = BRW_HW_REG_TYPE_F,
126 [BRW_REGISTER_TYPE_DF] = GEN7_HW_REG_TYPE_DF,
127 [BRW_REGISTER_TYPE_HF] = GEN8_HW_REG_TYPE_HF,
128 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_REG_TYPE_UQ,
129 [BRW_REGISTER_TYPE_Q] = GEN8_HW_REG_TYPE_Q,
130 };
131 assert(type < ARRAY_SIZE(hw_types));
132 assert(hw_types[type] != -1);
133 return hw_types[type];
134 }
135 }
136
137 /**
138 * Return the element size given a hardware register type and file.
139 *
140 * The hardware encoding may depend on whether the value is an immediate.
141 */
142 unsigned
143 brw_hw_reg_type_to_size(const struct gen_device_info *devinfo,
144 unsigned type, enum brw_reg_file file)
145 {
146 if (file == BRW_IMMEDIATE_VALUE) {
147 static const int hw_sizes[] = {
148 [0 ... 15] = -1,
149 [BRW_HW_IMM_TYPE_UD] = 4,
150 [BRW_HW_IMM_TYPE_D] = 4,
151 [BRW_HW_IMM_TYPE_UW] = 2,
152 [BRW_HW_IMM_TYPE_W] = 2,
153 [BRW_HW_IMM_TYPE_UV] = 2,
154 [BRW_HW_IMM_TYPE_VF] = 4,
155 [BRW_HW_IMM_TYPE_V] = 2,
156 [BRW_HW_IMM_TYPE_F] = 4,
157 [GEN8_HW_IMM_TYPE_UQ] = 8,
158 [GEN8_HW_IMM_TYPE_Q] = 8,
159 [GEN8_HW_IMM_TYPE_DF] = 8,
160 [GEN8_HW_IMM_TYPE_HF] = 2,
161 };
162 assert(type < ARRAY_SIZE(hw_sizes));
163 assert(hw_sizes[type] != -1);
164 return hw_sizes[type];
165 } else {
166 /* Non-immediate registers */
167 static const int hw_sizes[] = {
168 [0 ... 15] = -1,
169 [BRW_HW_REG_TYPE_UD] = 4,
170 [BRW_HW_REG_TYPE_D] = 4,
171 [BRW_HW_REG_TYPE_UW] = 2,
172 [BRW_HW_REG_TYPE_W] = 2,
173 [BRW_HW_REG_TYPE_UB] = 1,
174 [BRW_HW_REG_TYPE_B] = 1,
175 [GEN7_HW_REG_TYPE_DF] = 8,
176 [BRW_HW_REG_TYPE_F] = 4,
177 [GEN8_HW_REG_TYPE_UQ] = 8,
178 [GEN8_HW_REG_TYPE_Q] = 8,
179 [GEN8_HW_REG_TYPE_HF] = 2,
180 };
181 assert(type < ARRAY_SIZE(hw_sizes));
182 assert(hw_sizes[type] != -1);
183 return hw_sizes[type];
184 }
185 }
186
187 void
188 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
189 {
190 const struct gen_device_info *devinfo = p->devinfo;
191
192 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
193 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
194 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
195 assert(dest.nr < 128);
196
197 gen7_convert_mrf_to_grf(p, &dest);
198
199 brw_inst_set_dst_reg_file(devinfo, inst, dest.file);
200 brw_inst_set_dst_reg_type(devinfo, inst,
201 brw_reg_type_to_hw_type(devinfo, dest.type,
202 dest.file));
203 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
204
205 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
206 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
207
208 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
209 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
210 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
211 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
212 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
213 } else {
214 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
215 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
216 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
217 dest.file == BRW_MESSAGE_REGISTER_FILE) {
218 assert(dest.writemask != 0);
219 }
220 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
221 * Although Dst.HorzStride is a don't care for Align16, HW needs
222 * this to be programmed as "01".
223 */
224 brw_inst_set_dst_hstride(devinfo, inst, 1);
225 }
226 } else {
227 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
228
229 /* These are different sizes in align1 vs align16:
230 */
231 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
232 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
233 dest.indirect_offset);
234 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
235 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
236 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
237 } else {
238 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
239 dest.indirect_offset);
240 /* even ignored in da16, still need to set as '01' */
241 brw_inst_set_dst_hstride(devinfo, inst, 1);
242 }
243 }
244
245 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
246 * or 16 (SIMD16), as that's normally correct. However, when dealing with
247 * small registers, we automatically reduce it to match the register size.
248 *
249 * In platforms that support fp64 we can emit instructions with a width of
250 * 4 that need two SIMD8 registers and an exec_size of 8 or 16. In these
251 * cases we need to make sure that these instructions have their exec sizes
252 * set properly when they are emitted and we can't rely on this code to fix
253 * it.
254 */
255 bool fix_exec_size;
256 if (devinfo->gen >= 6)
257 fix_exec_size = dest.width < BRW_EXECUTE_4;
258 else
259 fix_exec_size = dest.width < BRW_EXECUTE_8;
260
261 if (fix_exec_size)
262 brw_inst_set_exec_size(devinfo, inst, dest.width);
263 }
264
265 static void
266 validate_reg(const struct gen_device_info *devinfo,
267 brw_inst *inst, struct brw_reg reg)
268 {
269 const int hstride_for_reg[] = {0, 1, 2, 4};
270 const int vstride_for_reg[] = {0, 1, 2, 4, 8, 16, 32};
271 const int width_for_reg[] = {1, 2, 4, 8, 16};
272 const int execsize_for_reg[] = {1, 2, 4, 8, 16, 32};
273 int width, hstride, vstride, execsize;
274
275 if (reg.file == BRW_IMMEDIATE_VALUE)
276 return;
277
278 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
279 reg.file == BRW_ARF_NULL)
280 return;
281
282 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
283 *
284 * "Swizzling is not allowed when an accumulator is used as an implicit
285 * source or an explicit source in an instruction."
286 */
287 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
288 reg.nr == BRW_ARF_ACCUMULATOR)
289 assert(reg.swizzle == BRW_SWIZZLE_XYZW);
290
291 assert(reg.hstride < ARRAY_SIZE(hstride_for_reg));
292 hstride = hstride_for_reg[reg.hstride];
293
294 if (reg.vstride == 0xf) {
295 vstride = -1;
296 } else {
297 assert(reg.vstride >= 0 && reg.vstride < ARRAY_SIZE(vstride_for_reg));
298 vstride = vstride_for_reg[reg.vstride];
299 }
300
301 assert(reg.width >= 0 && reg.width < ARRAY_SIZE(width_for_reg));
302 width = width_for_reg[reg.width];
303
304 assert(brw_inst_exec_size(devinfo, inst) >= 0 &&
305 brw_inst_exec_size(devinfo, inst) < ARRAY_SIZE(execsize_for_reg));
306 execsize = execsize_for_reg[brw_inst_exec_size(devinfo, inst)];
307
308 /* Restrictions from 3.3.10: Register Region Restrictions. */
309 /* 3. */
310 assert(execsize >= width);
311
312 /* 4. */
313 if (execsize == width && hstride != 0) {
314 assert(vstride == -1 || vstride == width * hstride);
315 }
316
317 /* 5. */
318 if (execsize == width && hstride == 0) {
319 /* no restriction on vstride. */
320 }
321
322 /* 6. */
323 if (width == 1) {
324 assert(hstride == 0);
325 }
326
327 /* 7. */
328 if (execsize == 1 && width == 1) {
329 assert(hstride == 0);
330 assert(vstride == 0);
331 }
332
333 /* 8. */
334 if (vstride == 0 && hstride == 0) {
335 assert(width == 1);
336 }
337
338 /* 10. Check destination issues. */
339 }
340
341 void
342 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
343 {
344 const struct gen_device_info *devinfo = p->devinfo;
345
346 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
347 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
348 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
349 assert(reg.nr < 128);
350
351 gen7_convert_mrf_to_grf(p, &reg);
352
353 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
354 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
355 /* Any source modifiers or regions will be ignored, since this just
356 * identifies the MRF/GRF to start reading the message contents from.
357 * Check for some likely failures.
358 */
359 assert(!reg.negate);
360 assert(!reg.abs);
361 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
362 }
363
364 validate_reg(devinfo, inst, reg);
365
366 brw_inst_set_src0_reg_file(devinfo, inst, reg.file);
367 brw_inst_set_src0_reg_type(devinfo, inst,
368 brw_reg_type_to_hw_type(devinfo, reg.type, reg.file));
369 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
370 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
371 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
372
373 if (reg.file == BRW_IMMEDIATE_VALUE) {
374 if (reg.type == BRW_REGISTER_TYPE_DF ||
375 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
376 brw_inst_set_imm_df(devinfo, inst, reg.df);
377 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
378 reg.type == BRW_REGISTER_TYPE_Q)
379 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
380 else
381 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
382
383 if (type_sz(reg.type) < 8) {
384 brw_inst_set_src1_reg_file(devinfo, inst,
385 BRW_ARCHITECTURE_REGISTER_FILE);
386 brw_inst_set_src1_reg_type(devinfo, inst,
387 brw_inst_src0_reg_type(devinfo, inst));
388 }
389 } else {
390 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
391 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
392 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
393 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
394 } else {
395 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
396 }
397 } else {
398 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
399
400 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
401 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
402 } else {
403 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
404 }
405 }
406
407 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
408 if (reg.width == BRW_WIDTH_1 &&
409 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
410 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
411 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
412 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
413 } else {
414 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
415 brw_inst_set_src0_width(devinfo, inst, reg.width);
416 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
417 }
418 } else {
419 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
420 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
421 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
422 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
423 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
424 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
425 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
426 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
427
428 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
429 /* This is an oddity of the fact we're using the same
430 * descriptions for registers in align_16 as align_1:
431 */
432 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
433 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
434 reg.type == BRW_REGISTER_TYPE_DF &&
435 reg.vstride == BRW_VERTICAL_STRIDE_2) {
436 /* From SNB PRM:
437 *
438 * "For Align16 access mode, only encodings of 0000 and 0011
439 * are allowed. Other codes are reserved."
440 *
441 * Presumably the DevSNB behavior applies to IVB as well.
442 */
443 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
444 } else {
445 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
446 }
447 }
448 }
449 }
450
451
452 void
453 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
454 {
455 const struct gen_device_info *devinfo = p->devinfo;
456
457 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
458 assert(reg.nr < 128);
459
460 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
461 *
462 * "Accumulator registers may be accessed explicitly as src0
463 * operands only."
464 */
465 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
466 reg.nr != BRW_ARF_ACCUMULATOR);
467
468 gen7_convert_mrf_to_grf(p, &reg);
469 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
470
471 validate_reg(devinfo, inst, reg);
472
473 brw_inst_set_src1_reg_file(devinfo, inst, reg.file);
474 brw_inst_set_src1_reg_type(devinfo, inst,
475 brw_reg_type_to_hw_type(devinfo, reg.type, reg.file));
476 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
477 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
478
479 /* Only src1 can be immediate in two-argument instructions.
480 */
481 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
482
483 if (reg.file == BRW_IMMEDIATE_VALUE) {
484 /* two-argument instructions can only use 32-bit immediates */
485 assert(type_sz(reg.type) < 8);
486 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
487 } else {
488 /* This is a hardware restriction, which may or may not be lifted
489 * in the future:
490 */
491 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
492 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
493
494 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
495 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
496 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
497 } else {
498 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
499 }
500
501 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
502 if (reg.width == BRW_WIDTH_1 &&
503 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
504 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
505 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
506 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
507 } else {
508 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
509 brw_inst_set_src1_width(devinfo, inst, reg.width);
510 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
511 }
512 } else {
513 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
514 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
515 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
516 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
517 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
518 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
519 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
520 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
521
522 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
523 /* This is an oddity of the fact we're using the same
524 * descriptions for registers in align_16 as align_1:
525 */
526 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
527 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
528 reg.type == BRW_REGISTER_TYPE_DF &&
529 reg.vstride == BRW_VERTICAL_STRIDE_2) {
530 /* From SNB PRM:
531 *
532 * "For Align16 access mode, only encodings of 0000 and 0011
533 * are allowed. Other codes are reserved."
534 *
535 * Presumably the DevSNB behavior applies to IVB as well.
536 */
537 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
538 } else {
539 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
540 }
541 }
542 }
543 }
544
545 /**
546 * Set the Message Descriptor and Extended Message Descriptor fields
547 * for SEND messages.
548 *
549 * \note This zeroes out the Function Control bits, so it must be called
550 * \b before filling out any message-specific data. Callers can
551 * choose not to fill in irrelevant bits; they will be zero.
552 */
553 void
554 brw_set_message_descriptor(struct brw_codegen *p,
555 brw_inst *inst,
556 enum brw_message_target sfid,
557 unsigned msg_length,
558 unsigned response_length,
559 bool header_present,
560 bool end_of_thread)
561 {
562 const struct gen_device_info *devinfo = p->devinfo;
563
564 brw_set_src1(p, inst, brw_imm_d(0));
565
566 /* For indirect sends, `inst` will not be the SEND/SENDC instruction
567 * itself; instead, it will be a MOV/OR into the address register.
568 *
569 * In this case, we avoid setting the extended message descriptor bits,
570 * since they go on the later SEND/SENDC instead and if set here would
571 * instead clobber the conditionalmod bits.
572 */
573 unsigned opcode = brw_inst_opcode(devinfo, inst);
574 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
575 brw_inst_set_sfid(devinfo, inst, sfid);
576 }
577
578 brw_inst_set_mlen(devinfo, inst, msg_length);
579 brw_inst_set_rlen(devinfo, inst, response_length);
580 brw_inst_set_eot(devinfo, inst, end_of_thread);
581
582 if (devinfo->gen >= 5) {
583 brw_inst_set_header_present(devinfo, inst, header_present);
584 }
585 }
586
587 static void brw_set_math_message( struct brw_codegen *p,
588 brw_inst *inst,
589 unsigned function,
590 unsigned integer_type,
591 bool low_precision,
592 unsigned dataType )
593 {
594 const struct gen_device_info *devinfo = p->devinfo;
595 unsigned msg_length;
596 unsigned response_length;
597
598 /* Infer message length from the function */
599 switch (function) {
600 case BRW_MATH_FUNCTION_POW:
601 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
602 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
603 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
604 msg_length = 2;
605 break;
606 default:
607 msg_length = 1;
608 break;
609 }
610
611 /* Infer response length from the function */
612 switch (function) {
613 case BRW_MATH_FUNCTION_SINCOS:
614 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
615 response_length = 2;
616 break;
617 default:
618 response_length = 1;
619 break;
620 }
621
622
623 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
624 msg_length, response_length, false, false);
625 brw_inst_set_math_msg_function(devinfo, inst, function);
626 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
627 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
628 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
629 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
630 brw_inst_set_saturate(devinfo, inst, 0);
631 }
632
633
634 static void brw_set_ff_sync_message(struct brw_codegen *p,
635 brw_inst *insn,
636 bool allocate,
637 unsigned response_length,
638 bool end_of_thread)
639 {
640 const struct gen_device_info *devinfo = p->devinfo;
641
642 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
643 1, response_length, true, end_of_thread);
644 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
645 brw_inst_set_urb_allocate(devinfo, insn, allocate);
646 /* The following fields are not used by FF_SYNC: */
647 brw_inst_set_urb_global_offset(devinfo, insn, 0);
648 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
649 brw_inst_set_urb_used(devinfo, insn, 0);
650 brw_inst_set_urb_complete(devinfo, insn, 0);
651 }
652
653 static void brw_set_urb_message( struct brw_codegen *p,
654 brw_inst *insn,
655 enum brw_urb_write_flags flags,
656 unsigned msg_length,
657 unsigned response_length,
658 unsigned offset,
659 unsigned swizzle_control )
660 {
661 const struct gen_device_info *devinfo = p->devinfo;
662
663 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
664 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
665 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
666
667 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
668 msg_length, response_length, true,
669 flags & BRW_URB_WRITE_EOT);
670
671 if (flags & BRW_URB_WRITE_OWORD) {
672 assert(msg_length == 2); /* header + one OWORD of data */
673 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
674 } else {
675 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
676 }
677
678 brw_inst_set_urb_global_offset(devinfo, insn, offset);
679 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
680
681 if (devinfo->gen < 8) {
682 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
683 }
684
685 if (devinfo->gen < 7) {
686 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
687 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
688 } else {
689 brw_inst_set_urb_per_slot_offset(devinfo, insn,
690 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
691 }
692 }
693
694 void
695 brw_set_dp_write_message(struct brw_codegen *p,
696 brw_inst *insn,
697 unsigned binding_table_index,
698 unsigned msg_control,
699 unsigned msg_type,
700 unsigned target_cache,
701 unsigned msg_length,
702 bool header_present,
703 unsigned last_render_target,
704 unsigned response_length,
705 unsigned end_of_thread,
706 unsigned send_commit_msg)
707 {
708 const struct gen_device_info *devinfo = p->devinfo;
709 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
710 BRW_SFID_DATAPORT_WRITE);
711
712 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
713 header_present, end_of_thread);
714
715 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
716 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
717 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
718 brw_inst_set_rt_last(devinfo, insn, last_render_target);
719 if (devinfo->gen < 7) {
720 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
721 }
722 }
723
724 void
725 brw_set_dp_read_message(struct brw_codegen *p,
726 brw_inst *insn,
727 unsigned binding_table_index,
728 unsigned msg_control,
729 unsigned msg_type,
730 unsigned target_cache,
731 unsigned msg_length,
732 bool header_present,
733 unsigned response_length)
734 {
735 const struct gen_device_info *devinfo = p->devinfo;
736 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
737 BRW_SFID_DATAPORT_READ);
738
739 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
740 header_present, false);
741
742 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
743 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
744 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
745 if (devinfo->gen < 6)
746 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
747 }
748
749 void
750 brw_set_sampler_message(struct brw_codegen *p,
751 brw_inst *inst,
752 unsigned binding_table_index,
753 unsigned sampler,
754 unsigned msg_type,
755 unsigned response_length,
756 unsigned msg_length,
757 unsigned header_present,
758 unsigned simd_mode,
759 unsigned return_format)
760 {
761 const struct gen_device_info *devinfo = p->devinfo;
762
763 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
764 response_length, header_present, false);
765
766 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
767 brw_inst_set_sampler(devinfo, inst, sampler);
768 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
769 if (devinfo->gen >= 5) {
770 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
771 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
772 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
773 }
774 }
775
776 static void
777 gen7_set_dp_scratch_message(struct brw_codegen *p,
778 brw_inst *inst,
779 bool write,
780 bool dword,
781 bool invalidate_after_read,
782 unsigned num_regs,
783 unsigned addr_offset,
784 unsigned mlen,
785 unsigned rlen,
786 bool header_present)
787 {
788 const struct gen_device_info *devinfo = p->devinfo;
789 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
790 (devinfo->gen >= 8 && num_regs == 8));
791 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
792 num_regs - 1);
793
794 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
795 mlen, rlen, header_present, false);
796 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
797 brw_inst_set_scratch_read_write(devinfo, inst, write);
798 brw_inst_set_scratch_type(devinfo, inst, dword);
799 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
800 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
801 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
802 }
803
804 #define next_insn brw_next_insn
805 brw_inst *
806 brw_next_insn(struct brw_codegen *p, unsigned opcode)
807 {
808 const struct gen_device_info *devinfo = p->devinfo;
809 brw_inst *insn;
810
811 if (p->nr_insn + 1 > p->store_size) {
812 p->store_size <<= 1;
813 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
814 }
815
816 p->next_insn_offset += 16;
817 insn = &p->store[p->nr_insn++];
818 memcpy(insn, p->current, sizeof(*insn));
819
820 brw_inst_set_opcode(devinfo, insn, opcode);
821 return insn;
822 }
823
824 static brw_inst *
825 brw_alu1(struct brw_codegen *p, unsigned opcode,
826 struct brw_reg dest, struct brw_reg src)
827 {
828 brw_inst *insn = next_insn(p, opcode);
829 brw_set_dest(p, insn, dest);
830 brw_set_src0(p, insn, src);
831 return insn;
832 }
833
834 static brw_inst *
835 brw_alu2(struct brw_codegen *p, unsigned opcode,
836 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
837 {
838 /* 64-bit immediates are only supported on 1-src instructions */
839 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
840 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
841
842 brw_inst *insn = next_insn(p, opcode);
843 brw_set_dest(p, insn, dest);
844 brw_set_src0(p, insn, src0);
845 brw_set_src1(p, insn, src1);
846 return insn;
847 }
848
849 static int
850 get_3src_subreg_nr(struct brw_reg reg)
851 {
852 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
853 * use 32-bit units (components 0..7). Since they only support F/D/UD
854 * types, this doesn't lose any flexibility, but uses fewer bits.
855 */
856 return reg.subnr / 4;
857 }
858
859 static brw_inst *
860 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
861 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
862 {
863 const struct gen_device_info *devinfo = p->devinfo;
864 brw_inst *inst = next_insn(p, opcode);
865
866 gen7_convert_mrf_to_grf(p, &dest);
867
868 assert(brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16);
869
870 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
871 dest.file == BRW_MESSAGE_REGISTER_FILE);
872 assert(dest.nr < 128);
873 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
874 assert(dest.type == BRW_REGISTER_TYPE_F ||
875 dest.type == BRW_REGISTER_TYPE_DF ||
876 dest.type == BRW_REGISTER_TYPE_D ||
877 dest.type == BRW_REGISTER_TYPE_UD);
878 if (devinfo->gen == 6) {
879 brw_inst_set_3src_dst_reg_file(devinfo, inst,
880 dest.file == BRW_MESSAGE_REGISTER_FILE);
881 }
882 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
883 brw_inst_set_3src_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
884 brw_inst_set_3src_dst_writemask(devinfo, inst, dest.writemask);
885
886 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
887 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
888 assert(src0.nr < 128);
889 brw_inst_set_3src_src0_swizzle(devinfo, inst, src0.swizzle);
890 brw_inst_set_3src_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
891 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
892 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
893 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
894 brw_inst_set_3src_src0_rep_ctrl(devinfo, inst,
895 src0.vstride == BRW_VERTICAL_STRIDE_0);
896
897 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
898 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
899 assert(src1.nr < 128);
900 brw_inst_set_3src_src1_swizzle(devinfo, inst, src1.swizzle);
901 brw_inst_set_3src_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
902 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
903 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
904 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
905 brw_inst_set_3src_src1_rep_ctrl(devinfo, inst,
906 src1.vstride == BRW_VERTICAL_STRIDE_0);
907
908 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
909 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
910 assert(src2.nr < 128);
911 brw_inst_set_3src_src2_swizzle(devinfo, inst, src2.swizzle);
912 brw_inst_set_3src_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
913 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
914 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
915 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
916 brw_inst_set_3src_src2_rep_ctrl(devinfo, inst,
917 src2.vstride == BRW_VERTICAL_STRIDE_0);
918
919 if (devinfo->gen >= 7) {
920 /* Set both the source and destination types based on dest.type,
921 * ignoring the source register types. The MAD and LRP emitters ensure
922 * that all four types are float. The BFE and BFI2 emitters, however,
923 * may send us mixed D and UD types and want us to ignore that and use
924 * the destination type.
925 */
926 switch (dest.type) {
927 case BRW_REGISTER_TYPE_F:
928 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_F);
929 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_F);
930 break;
931 case BRW_REGISTER_TYPE_DF:
932 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_DF);
933 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_DF);
934 break;
935 case BRW_REGISTER_TYPE_D:
936 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_D);
937 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_D);
938 break;
939 case BRW_REGISTER_TYPE_UD:
940 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_UD);
941 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_UD);
942 break;
943 default:
944 unreachable("not reached");
945 }
946 }
947
948 return inst;
949 }
950
951
952 /***********************************************************************
953 * Convenience routines.
954 */
955 #define ALU1(OP) \
956 brw_inst *brw_##OP(struct brw_codegen *p, \
957 struct brw_reg dest, \
958 struct brw_reg src0) \
959 { \
960 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
961 }
962
963 #define ALU2(OP) \
964 brw_inst *brw_##OP(struct brw_codegen *p, \
965 struct brw_reg dest, \
966 struct brw_reg src0, \
967 struct brw_reg src1) \
968 { \
969 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
970 }
971
972 #define ALU3(OP) \
973 brw_inst *brw_##OP(struct brw_codegen *p, \
974 struct brw_reg dest, \
975 struct brw_reg src0, \
976 struct brw_reg src1, \
977 struct brw_reg src2) \
978 { \
979 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
980 }
981
982 #define ALU3F(OP) \
983 brw_inst *brw_##OP(struct brw_codegen *p, \
984 struct brw_reg dest, \
985 struct brw_reg src0, \
986 struct brw_reg src1, \
987 struct brw_reg src2) \
988 { \
989 assert(dest.type == BRW_REGISTER_TYPE_F || \
990 dest.type == BRW_REGISTER_TYPE_DF); \
991 if (dest.type == BRW_REGISTER_TYPE_F) { \
992 assert(src0.type == BRW_REGISTER_TYPE_F); \
993 assert(src1.type == BRW_REGISTER_TYPE_F); \
994 assert(src2.type == BRW_REGISTER_TYPE_F); \
995 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
996 assert(src0.type == BRW_REGISTER_TYPE_DF); \
997 assert(src1.type == BRW_REGISTER_TYPE_DF); \
998 assert(src2.type == BRW_REGISTER_TYPE_DF); \
999 } \
1000 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
1001 }
1002
1003 /* Rounding operations (other than RNDD) require two instructions - the first
1004 * stores a rounded value (possibly the wrong way) in the dest register, but
1005 * also sets a per-channel "increment bit" in the flag register. A predicated
1006 * add of 1.0 fixes dest to contain the desired result.
1007 *
1008 * Sandybridge and later appear to round correctly without an ADD.
1009 */
1010 #define ROUND(OP) \
1011 void brw_##OP(struct brw_codegen *p, \
1012 struct brw_reg dest, \
1013 struct brw_reg src) \
1014 { \
1015 const struct gen_device_info *devinfo = p->devinfo; \
1016 brw_inst *rnd, *add; \
1017 rnd = next_insn(p, BRW_OPCODE_##OP); \
1018 brw_set_dest(p, rnd, dest); \
1019 brw_set_src0(p, rnd, src); \
1020 \
1021 if (devinfo->gen < 6) { \
1022 /* turn on round-increments */ \
1023 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
1024 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
1025 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
1026 } \
1027 }
1028
1029
1030 ALU2(SEL)
1031 ALU1(NOT)
1032 ALU2(AND)
1033 ALU2(OR)
1034 ALU2(XOR)
1035 ALU2(SHR)
1036 ALU2(SHL)
1037 ALU1(DIM)
1038 ALU2(ASR)
1039 ALU1(FRC)
1040 ALU1(RNDD)
1041 ALU2(MAC)
1042 ALU2(MACH)
1043 ALU1(LZD)
1044 ALU2(DP4)
1045 ALU2(DPH)
1046 ALU2(DP3)
1047 ALU2(DP2)
1048 ALU3F(MAD)
1049 ALU3F(LRP)
1050 ALU1(BFREV)
1051 ALU3(BFE)
1052 ALU2(BFI1)
1053 ALU3(BFI2)
1054 ALU1(FBH)
1055 ALU1(FBL)
1056 ALU1(CBIT)
1057 ALU2(ADDC)
1058 ALU2(SUBB)
1059
1060 ROUND(RNDZ)
1061 ROUND(RNDE)
1062
1063 brw_inst *
1064 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
1065 {
1066 const struct gen_device_info *devinfo = p->devinfo;
1067
1068 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
1069 * To avoid the problems that causes, we use a <1,2,0> source region to read
1070 * each element twice.
1071 */
1072 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1073 brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1 &&
1074 dest.type == BRW_REGISTER_TYPE_DF &&
1075 (src0.type == BRW_REGISTER_TYPE_F ||
1076 src0.type == BRW_REGISTER_TYPE_D ||
1077 src0.type == BRW_REGISTER_TYPE_UD) &&
1078 !has_scalar_region(src0)) {
1079 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
1080 src0.width == BRW_WIDTH_4 &&
1081 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1082
1083 src0.vstride = BRW_VERTICAL_STRIDE_1;
1084 src0.width = BRW_WIDTH_2;
1085 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1086 }
1087
1088 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1089 }
1090
1091 brw_inst *
1092 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1093 struct brw_reg src0, struct brw_reg src1)
1094 {
1095 /* 6.2.2: add */
1096 if (src0.type == BRW_REGISTER_TYPE_F ||
1097 (src0.file == BRW_IMMEDIATE_VALUE &&
1098 src0.type == BRW_REGISTER_TYPE_VF)) {
1099 assert(src1.type != BRW_REGISTER_TYPE_UD);
1100 assert(src1.type != BRW_REGISTER_TYPE_D);
1101 }
1102
1103 if (src1.type == BRW_REGISTER_TYPE_F ||
1104 (src1.file == BRW_IMMEDIATE_VALUE &&
1105 src1.type == BRW_REGISTER_TYPE_VF)) {
1106 assert(src0.type != BRW_REGISTER_TYPE_UD);
1107 assert(src0.type != BRW_REGISTER_TYPE_D);
1108 }
1109
1110 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1111 }
1112
1113 brw_inst *
1114 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1115 struct brw_reg src0, struct brw_reg src1)
1116 {
1117 assert(dest.type == src0.type);
1118 assert(src0.type == src1.type);
1119 switch (src0.type) {
1120 case BRW_REGISTER_TYPE_B:
1121 case BRW_REGISTER_TYPE_UB:
1122 case BRW_REGISTER_TYPE_W:
1123 case BRW_REGISTER_TYPE_UW:
1124 case BRW_REGISTER_TYPE_D:
1125 case BRW_REGISTER_TYPE_UD:
1126 break;
1127 default:
1128 unreachable("Bad type for brw_AVG");
1129 }
1130
1131 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1132 }
1133
1134 brw_inst *
1135 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1136 struct brw_reg src0, struct brw_reg src1)
1137 {
1138 /* 6.32.38: mul */
1139 if (src0.type == BRW_REGISTER_TYPE_D ||
1140 src0.type == BRW_REGISTER_TYPE_UD ||
1141 src1.type == BRW_REGISTER_TYPE_D ||
1142 src1.type == BRW_REGISTER_TYPE_UD) {
1143 assert(dest.type != BRW_REGISTER_TYPE_F);
1144 }
1145
1146 if (src0.type == BRW_REGISTER_TYPE_F ||
1147 (src0.file == BRW_IMMEDIATE_VALUE &&
1148 src0.type == BRW_REGISTER_TYPE_VF)) {
1149 assert(src1.type != BRW_REGISTER_TYPE_UD);
1150 assert(src1.type != BRW_REGISTER_TYPE_D);
1151 }
1152
1153 if (src1.type == BRW_REGISTER_TYPE_F ||
1154 (src1.file == BRW_IMMEDIATE_VALUE &&
1155 src1.type == BRW_REGISTER_TYPE_VF)) {
1156 assert(src0.type != BRW_REGISTER_TYPE_UD);
1157 assert(src0.type != BRW_REGISTER_TYPE_D);
1158 }
1159
1160 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1161 src0.nr != BRW_ARF_ACCUMULATOR);
1162 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1163 src1.nr != BRW_ARF_ACCUMULATOR);
1164
1165 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1166 }
1167
1168 brw_inst *
1169 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1170 struct brw_reg src0, struct brw_reg src1)
1171 {
1172 src0.vstride = BRW_VERTICAL_STRIDE_0;
1173 src0.width = BRW_WIDTH_1;
1174 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1175 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1176 }
1177
1178 brw_inst *
1179 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1180 struct brw_reg src0, struct brw_reg src1)
1181 {
1182 src0.vstride = BRW_VERTICAL_STRIDE_0;
1183 src0.width = BRW_WIDTH_1;
1184 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1185 src1.vstride = BRW_VERTICAL_STRIDE_8;
1186 src1.width = BRW_WIDTH_8;
1187 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1188 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1189 }
1190
1191 brw_inst *
1192 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1193 {
1194 const struct gen_device_info *devinfo = p->devinfo;
1195 const bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1196 /* The F32TO16 instruction doesn't support 32-bit destination types in
1197 * Align1 mode, and neither does the Gen8 implementation in terms of a
1198 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1199 * an undocumented feature.
1200 */
1201 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1202 (!align16 || devinfo->gen >= 8));
1203 brw_inst *inst;
1204
1205 if (align16) {
1206 assert(dst.type == BRW_REGISTER_TYPE_UD);
1207 } else {
1208 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1209 dst.type == BRW_REGISTER_TYPE_W ||
1210 dst.type == BRW_REGISTER_TYPE_UW ||
1211 dst.type == BRW_REGISTER_TYPE_HF);
1212 }
1213
1214 brw_push_insn_state(p);
1215
1216 if (needs_zero_fill) {
1217 brw_set_default_access_mode(p, BRW_ALIGN_1);
1218 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1219 }
1220
1221 if (devinfo->gen >= 8) {
1222 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1223 } else {
1224 assert(devinfo->gen == 7);
1225 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1226 }
1227
1228 if (needs_zero_fill) {
1229 brw_inst_set_no_dd_clear(devinfo, inst, true);
1230 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1231 brw_inst_set_no_dd_check(devinfo, inst, true);
1232 }
1233
1234 brw_pop_insn_state(p);
1235 return inst;
1236 }
1237
1238 brw_inst *
1239 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1240 {
1241 const struct gen_device_info *devinfo = p->devinfo;
1242 bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1243
1244 if (align16) {
1245 assert(src.type == BRW_REGISTER_TYPE_UD);
1246 } else {
1247 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1248 *
1249 * Because this instruction does not have a 16-bit floating-point
1250 * type, the source data type must be Word (W). The destination type
1251 * must be F (Float).
1252 */
1253 if (src.type == BRW_REGISTER_TYPE_UD)
1254 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1255
1256 assert(src.type == BRW_REGISTER_TYPE_W ||
1257 src.type == BRW_REGISTER_TYPE_UW ||
1258 src.type == BRW_REGISTER_TYPE_HF);
1259 }
1260
1261 if (devinfo->gen >= 8) {
1262 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1263 } else {
1264 assert(devinfo->gen == 7);
1265 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1266 }
1267 }
1268
1269
1270 void brw_NOP(struct brw_codegen *p)
1271 {
1272 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1273 memset(insn, 0, sizeof(*insn));
1274 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1275 }
1276
1277
1278
1279
1280
1281 /***********************************************************************
1282 * Comparisons, if/else/endif
1283 */
1284
1285 brw_inst *
1286 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1287 unsigned predicate_control)
1288 {
1289 const struct gen_device_info *devinfo = p->devinfo;
1290 struct brw_reg ip = brw_ip_reg();
1291 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1292
1293 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_2);
1294 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1295 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1296 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1297
1298 return inst;
1299 }
1300
1301 static void
1302 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1303 {
1304 p->if_stack[p->if_stack_depth] = inst - p->store;
1305
1306 p->if_stack_depth++;
1307 if (p->if_stack_array_size <= p->if_stack_depth) {
1308 p->if_stack_array_size *= 2;
1309 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1310 p->if_stack_array_size);
1311 }
1312 }
1313
1314 static brw_inst *
1315 pop_if_stack(struct brw_codegen *p)
1316 {
1317 p->if_stack_depth--;
1318 return &p->store[p->if_stack[p->if_stack_depth]];
1319 }
1320
1321 static void
1322 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1323 {
1324 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1325 p->loop_stack_array_size *= 2;
1326 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1327 p->loop_stack_array_size);
1328 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1329 p->loop_stack_array_size);
1330 }
1331
1332 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1333 p->loop_stack_depth++;
1334 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1335 }
1336
1337 static brw_inst *
1338 get_inner_do_insn(struct brw_codegen *p)
1339 {
1340 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1341 }
1342
1343 /* EU takes the value from the flag register and pushes it onto some
1344 * sort of a stack (presumably merging with any flag value already on
1345 * the stack). Within an if block, the flags at the top of the stack
1346 * control execution on each channel of the unit, eg. on each of the
1347 * 16 pixel values in our wm programs.
1348 *
1349 * When the matching 'else' instruction is reached (presumably by
1350 * countdown of the instruction count patched in by our ELSE/ENDIF
1351 * functions), the relevant flags are inverted.
1352 *
1353 * When the matching 'endif' instruction is reached, the flags are
1354 * popped off. If the stack is now empty, normal execution resumes.
1355 */
1356 brw_inst *
1357 brw_IF(struct brw_codegen *p, unsigned execute_size)
1358 {
1359 const struct gen_device_info *devinfo = p->devinfo;
1360 brw_inst *insn;
1361
1362 insn = next_insn(p, BRW_OPCODE_IF);
1363
1364 /* Override the defaults for this instruction:
1365 */
1366 if (devinfo->gen < 6) {
1367 brw_set_dest(p, insn, brw_ip_reg());
1368 brw_set_src0(p, insn, brw_ip_reg());
1369 brw_set_src1(p, insn, brw_imm_d(0x0));
1370 } else if (devinfo->gen == 6) {
1371 brw_set_dest(p, insn, brw_imm_w(0));
1372 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1373 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1374 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1375 } else if (devinfo->gen == 7) {
1376 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1377 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1378 brw_set_src1(p, insn, brw_imm_w(0));
1379 brw_inst_set_jip(devinfo, insn, 0);
1380 brw_inst_set_uip(devinfo, insn, 0);
1381 } else {
1382 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1383 brw_set_src0(p, insn, brw_imm_d(0));
1384 brw_inst_set_jip(devinfo, insn, 0);
1385 brw_inst_set_uip(devinfo, insn, 0);
1386 }
1387
1388 brw_inst_set_exec_size(devinfo, insn, execute_size);
1389 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1390 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1391 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1392 if (!p->single_program_flow && devinfo->gen < 6)
1393 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1394
1395 push_if_stack(p, insn);
1396 p->if_depth_in_loop[p->loop_stack_depth]++;
1397 return insn;
1398 }
1399
1400 /* This function is only used for gen6-style IF instructions with an
1401 * embedded comparison (conditional modifier). It is not used on gen7.
1402 */
1403 brw_inst *
1404 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1405 struct brw_reg src0, struct brw_reg src1)
1406 {
1407 const struct gen_device_info *devinfo = p->devinfo;
1408 brw_inst *insn;
1409
1410 insn = next_insn(p, BRW_OPCODE_IF);
1411
1412 brw_set_dest(p, insn, brw_imm_w(0));
1413 brw_inst_set_exec_size(devinfo, insn,
1414 brw_inst_exec_size(devinfo, p->current));
1415 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1416 brw_set_src0(p, insn, src0);
1417 brw_set_src1(p, insn, src1);
1418
1419 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1420 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1421 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1422
1423 push_if_stack(p, insn);
1424 return insn;
1425 }
1426
1427 /**
1428 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1429 */
1430 static void
1431 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1432 brw_inst *if_inst, brw_inst *else_inst)
1433 {
1434 const struct gen_device_info *devinfo = p->devinfo;
1435
1436 /* The next instruction (where the ENDIF would be, if it existed) */
1437 brw_inst *next_inst = &p->store[p->nr_insn];
1438
1439 assert(p->single_program_flow);
1440 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1441 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1442 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1443
1444 /* Convert IF to an ADD instruction that moves the instruction pointer
1445 * to the first instruction of the ELSE block. If there is no ELSE
1446 * block, point to where ENDIF would be. Reverse the predicate.
1447 *
1448 * There's no need to execute an ENDIF since we don't need to do any
1449 * stack operations, and if we're currently executing, we just want to
1450 * continue normally.
1451 */
1452 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1453 brw_inst_set_pred_inv(devinfo, if_inst, true);
1454
1455 if (else_inst != NULL) {
1456 /* Convert ELSE to an ADD instruction that points where the ENDIF
1457 * would be.
1458 */
1459 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1460
1461 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1462 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1463 } else {
1464 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1465 }
1466 }
1467
1468 /**
1469 * Patch IF and ELSE instructions with appropriate jump targets.
1470 */
1471 static void
1472 patch_IF_ELSE(struct brw_codegen *p,
1473 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1474 {
1475 const struct gen_device_info *devinfo = p->devinfo;
1476
1477 /* We shouldn't be patching IF and ELSE instructions in single program flow
1478 * mode when gen < 6, because in single program flow mode on those
1479 * platforms, we convert flow control instructions to conditional ADDs that
1480 * operate on IP (see brw_ENDIF).
1481 *
1482 * However, on Gen6, writing to IP doesn't work in single program flow mode
1483 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1484 * not be updated by non-flow control instructions."). And on later
1485 * platforms, there is no significant benefit to converting control flow
1486 * instructions to conditional ADDs. So we do patch IF and ELSE
1487 * instructions in single program flow mode on those platforms.
1488 */
1489 if (devinfo->gen < 6)
1490 assert(!p->single_program_flow);
1491
1492 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1493 assert(endif_inst != NULL);
1494 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1495
1496 unsigned br = brw_jump_scale(devinfo);
1497
1498 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1499 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1500
1501 if (else_inst == NULL) {
1502 /* Patch IF -> ENDIF */
1503 if (devinfo->gen < 6) {
1504 /* Turn it into an IFF, which means no mask stack operations for
1505 * all-false and jumping past the ENDIF.
1506 */
1507 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1508 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1509 br * (endif_inst - if_inst + 1));
1510 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1511 } else if (devinfo->gen == 6) {
1512 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1513 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1514 } else {
1515 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1516 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1517 }
1518 } else {
1519 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1520
1521 /* Patch IF -> ELSE */
1522 if (devinfo->gen < 6) {
1523 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1524 br * (else_inst - if_inst));
1525 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1526 } else if (devinfo->gen == 6) {
1527 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1528 br * (else_inst - if_inst + 1));
1529 }
1530
1531 /* Patch ELSE -> ENDIF */
1532 if (devinfo->gen < 6) {
1533 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1534 * matching ENDIF.
1535 */
1536 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1537 br * (endif_inst - else_inst + 1));
1538 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1539 } else if (devinfo->gen == 6) {
1540 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1541 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1542 br * (endif_inst - else_inst));
1543 } else {
1544 /* The IF instruction's JIP should point just past the ELSE */
1545 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1546 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1547 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1548 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1549 if (devinfo->gen >= 8) {
1550 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1551 * should point to ENDIF.
1552 */
1553 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1554 }
1555 }
1556 }
1557 }
1558
1559 void
1560 brw_ELSE(struct brw_codegen *p)
1561 {
1562 const struct gen_device_info *devinfo = p->devinfo;
1563 brw_inst *insn;
1564
1565 insn = next_insn(p, BRW_OPCODE_ELSE);
1566
1567 if (devinfo->gen < 6) {
1568 brw_set_dest(p, insn, brw_ip_reg());
1569 brw_set_src0(p, insn, brw_ip_reg());
1570 brw_set_src1(p, insn, brw_imm_d(0x0));
1571 } else if (devinfo->gen == 6) {
1572 brw_set_dest(p, insn, brw_imm_w(0));
1573 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1574 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1575 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1576 } else if (devinfo->gen == 7) {
1577 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1578 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1579 brw_set_src1(p, insn, brw_imm_w(0));
1580 brw_inst_set_jip(devinfo, insn, 0);
1581 brw_inst_set_uip(devinfo, insn, 0);
1582 } else {
1583 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1584 brw_set_src0(p, insn, brw_imm_d(0));
1585 brw_inst_set_jip(devinfo, insn, 0);
1586 brw_inst_set_uip(devinfo, insn, 0);
1587 }
1588
1589 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1590 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1591 if (!p->single_program_flow && devinfo->gen < 6)
1592 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1593
1594 push_if_stack(p, insn);
1595 }
1596
1597 void
1598 brw_ENDIF(struct brw_codegen *p)
1599 {
1600 const struct gen_device_info *devinfo = p->devinfo;
1601 brw_inst *insn = NULL;
1602 brw_inst *else_inst = NULL;
1603 brw_inst *if_inst = NULL;
1604 brw_inst *tmp;
1605 bool emit_endif = true;
1606
1607 /* In single program flow mode, we can express IF and ELSE instructions
1608 * equivalently as ADD instructions that operate on IP. On platforms prior
1609 * to Gen6, flow control instructions cause an implied thread switch, so
1610 * this is a significant savings.
1611 *
1612 * However, on Gen6, writing to IP doesn't work in single program flow mode
1613 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1614 * not be updated by non-flow control instructions."). And on later
1615 * platforms, there is no significant benefit to converting control flow
1616 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1617 * Gen5.
1618 */
1619 if (devinfo->gen < 6 && p->single_program_flow)
1620 emit_endif = false;
1621
1622 /*
1623 * A single next_insn() may change the base address of instruction store
1624 * memory(p->store), so call it first before referencing the instruction
1625 * store pointer from an index
1626 */
1627 if (emit_endif)
1628 insn = next_insn(p, BRW_OPCODE_ENDIF);
1629
1630 /* Pop the IF and (optional) ELSE instructions from the stack */
1631 p->if_depth_in_loop[p->loop_stack_depth]--;
1632 tmp = pop_if_stack(p);
1633 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1634 else_inst = tmp;
1635 tmp = pop_if_stack(p);
1636 }
1637 if_inst = tmp;
1638
1639 if (!emit_endif) {
1640 /* ENDIF is useless; don't bother emitting it. */
1641 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1642 return;
1643 }
1644
1645 if (devinfo->gen < 6) {
1646 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1647 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1648 brw_set_src1(p, insn, brw_imm_d(0x0));
1649 } else if (devinfo->gen == 6) {
1650 brw_set_dest(p, insn, brw_imm_w(0));
1651 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1652 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1653 } else if (devinfo->gen == 7) {
1654 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1655 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1656 brw_set_src1(p, insn, brw_imm_w(0));
1657 } else {
1658 brw_set_src0(p, insn, brw_imm_d(0));
1659 }
1660
1661 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1662 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1663 if (devinfo->gen < 6)
1664 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1665
1666 /* Also pop item off the stack in the endif instruction: */
1667 if (devinfo->gen < 6) {
1668 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1669 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1670 } else if (devinfo->gen == 6) {
1671 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1672 } else {
1673 brw_inst_set_jip(devinfo, insn, 2);
1674 }
1675 patch_IF_ELSE(p, if_inst, else_inst, insn);
1676 }
1677
1678 brw_inst *
1679 brw_BREAK(struct brw_codegen *p)
1680 {
1681 const struct gen_device_info *devinfo = p->devinfo;
1682 brw_inst *insn;
1683
1684 insn = next_insn(p, BRW_OPCODE_BREAK);
1685 if (devinfo->gen >= 8) {
1686 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1687 brw_set_src0(p, insn, brw_imm_d(0x0));
1688 } else if (devinfo->gen >= 6) {
1689 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1690 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1691 brw_set_src1(p, insn, brw_imm_d(0x0));
1692 } else {
1693 brw_set_dest(p, insn, brw_ip_reg());
1694 brw_set_src0(p, insn, brw_ip_reg());
1695 brw_set_src1(p, insn, brw_imm_d(0x0));
1696 brw_inst_set_gen4_pop_count(devinfo, insn,
1697 p->if_depth_in_loop[p->loop_stack_depth]);
1698 }
1699 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1700 brw_inst_set_exec_size(devinfo, insn,
1701 brw_inst_exec_size(devinfo, p->current));
1702
1703 return insn;
1704 }
1705
1706 brw_inst *
1707 brw_CONT(struct brw_codegen *p)
1708 {
1709 const struct gen_device_info *devinfo = p->devinfo;
1710 brw_inst *insn;
1711
1712 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1713 brw_set_dest(p, insn, brw_ip_reg());
1714 if (devinfo->gen >= 8) {
1715 brw_set_src0(p, insn, brw_imm_d(0x0));
1716 } else {
1717 brw_set_src0(p, insn, brw_ip_reg());
1718 brw_set_src1(p, insn, brw_imm_d(0x0));
1719 }
1720
1721 if (devinfo->gen < 6) {
1722 brw_inst_set_gen4_pop_count(devinfo, insn,
1723 p->if_depth_in_loop[p->loop_stack_depth]);
1724 }
1725 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1726 brw_inst_set_exec_size(devinfo, insn,
1727 brw_inst_exec_size(devinfo, p->current));
1728 return insn;
1729 }
1730
1731 brw_inst *
1732 gen6_HALT(struct brw_codegen *p)
1733 {
1734 const struct gen_device_info *devinfo = p->devinfo;
1735 brw_inst *insn;
1736
1737 insn = next_insn(p, BRW_OPCODE_HALT);
1738 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1739 if (devinfo->gen >= 8) {
1740 brw_set_src0(p, insn, brw_imm_d(0x0));
1741 } else {
1742 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1743 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1744 }
1745
1746 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1747 brw_inst_set_exec_size(devinfo, insn,
1748 brw_inst_exec_size(devinfo, p->current));
1749 return insn;
1750 }
1751
1752 /* DO/WHILE loop:
1753 *
1754 * The DO/WHILE is just an unterminated loop -- break or continue are
1755 * used for control within the loop. We have a few ways they can be
1756 * done.
1757 *
1758 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1759 * jip and no DO instruction.
1760 *
1761 * For non-uniform control flow pre-gen6, there's a DO instruction to
1762 * push the mask, and a WHILE to jump back, and BREAK to get out and
1763 * pop the mask.
1764 *
1765 * For gen6, there's no more mask stack, so no need for DO. WHILE
1766 * just points back to the first instruction of the loop.
1767 */
1768 brw_inst *
1769 brw_DO(struct brw_codegen *p, unsigned execute_size)
1770 {
1771 const struct gen_device_info *devinfo = p->devinfo;
1772
1773 if (devinfo->gen >= 6 || p->single_program_flow) {
1774 push_loop_stack(p, &p->store[p->nr_insn]);
1775 return &p->store[p->nr_insn];
1776 } else {
1777 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1778
1779 push_loop_stack(p, insn);
1780
1781 /* Override the defaults for this instruction:
1782 */
1783 brw_set_dest(p, insn, brw_null_reg());
1784 brw_set_src0(p, insn, brw_null_reg());
1785 brw_set_src1(p, insn, brw_null_reg());
1786
1787 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1788 brw_inst_set_exec_size(devinfo, insn, execute_size);
1789 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1790
1791 return insn;
1792 }
1793 }
1794
1795 /**
1796 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1797 * instruction here.
1798 *
1799 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1800 * nesting, since it can always just point to the end of the block/current loop.
1801 */
1802 static void
1803 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1804 {
1805 const struct gen_device_info *devinfo = p->devinfo;
1806 brw_inst *do_inst = get_inner_do_insn(p);
1807 brw_inst *inst;
1808 unsigned br = brw_jump_scale(devinfo);
1809
1810 assert(devinfo->gen < 6);
1811
1812 for (inst = while_inst - 1; inst != do_inst; inst--) {
1813 /* If the jump count is != 0, that means that this instruction has already
1814 * been patched because it's part of a loop inside of the one we're
1815 * patching.
1816 */
1817 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1818 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1819 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1820 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1821 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1822 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1823 }
1824 }
1825 }
1826
1827 brw_inst *
1828 brw_WHILE(struct brw_codegen *p)
1829 {
1830 const struct gen_device_info *devinfo = p->devinfo;
1831 brw_inst *insn, *do_insn;
1832 unsigned br = brw_jump_scale(devinfo);
1833
1834 if (devinfo->gen >= 6) {
1835 insn = next_insn(p, BRW_OPCODE_WHILE);
1836 do_insn = get_inner_do_insn(p);
1837
1838 if (devinfo->gen >= 8) {
1839 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1840 brw_set_src0(p, insn, brw_imm_d(0));
1841 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1842 } else if (devinfo->gen == 7) {
1843 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1844 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1845 brw_set_src1(p, insn, brw_imm_w(0));
1846 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1847 } else {
1848 brw_set_dest(p, insn, brw_imm_w(0));
1849 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1850 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1851 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1852 }
1853
1854 brw_inst_set_exec_size(devinfo, insn,
1855 brw_inst_exec_size(devinfo, p->current));
1856
1857 } else {
1858 if (p->single_program_flow) {
1859 insn = next_insn(p, BRW_OPCODE_ADD);
1860 do_insn = get_inner_do_insn(p);
1861
1862 brw_set_dest(p, insn, brw_ip_reg());
1863 brw_set_src0(p, insn, brw_ip_reg());
1864 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1865 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1866 } else {
1867 insn = next_insn(p, BRW_OPCODE_WHILE);
1868 do_insn = get_inner_do_insn(p);
1869
1870 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1871
1872 brw_set_dest(p, insn, brw_ip_reg());
1873 brw_set_src0(p, insn, brw_ip_reg());
1874 brw_set_src1(p, insn, brw_imm_d(0));
1875
1876 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1877 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1878 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1879
1880 brw_patch_break_cont(p, insn);
1881 }
1882 }
1883 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1884
1885 p->loop_stack_depth--;
1886
1887 return insn;
1888 }
1889
1890 /* FORWARD JUMPS:
1891 */
1892 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1893 {
1894 const struct gen_device_info *devinfo = p->devinfo;
1895 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1896 unsigned jmpi = 1;
1897
1898 if (devinfo->gen >= 5)
1899 jmpi = 2;
1900
1901 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1902 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1903
1904 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1905 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1906 }
1907
1908 /* To integrate with the above, it makes sense that the comparison
1909 * instruction should populate the flag register. It might be simpler
1910 * just to use the flag reg for most WM tasks?
1911 */
1912 void brw_CMP(struct brw_codegen *p,
1913 struct brw_reg dest,
1914 unsigned conditional,
1915 struct brw_reg src0,
1916 struct brw_reg src1)
1917 {
1918 const struct gen_device_info *devinfo = p->devinfo;
1919 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1920
1921 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1922 brw_set_dest(p, insn, dest);
1923 brw_set_src0(p, insn, src0);
1924 brw_set_src1(p, insn, src1);
1925
1926 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1927 * page says:
1928 * "Any CMP instruction with a null destination must use a {switch}."
1929 *
1930 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1931 * mentioned on their work-arounds pages.
1932 */
1933 if (devinfo->gen == 7) {
1934 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1935 dest.nr == BRW_ARF_NULL) {
1936 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1937 }
1938 }
1939 }
1940
1941 /***********************************************************************
1942 * Helpers for the various SEND message types:
1943 */
1944
1945 /** Extended math function, float[8].
1946 */
1947 void gen4_math(struct brw_codegen *p,
1948 struct brw_reg dest,
1949 unsigned function,
1950 unsigned msg_reg_nr,
1951 struct brw_reg src,
1952 unsigned precision )
1953 {
1954 const struct gen_device_info *devinfo = p->devinfo;
1955 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1956 unsigned data_type;
1957 if (has_scalar_region(src)) {
1958 data_type = BRW_MATH_DATA_SCALAR;
1959 } else {
1960 data_type = BRW_MATH_DATA_VECTOR;
1961 }
1962
1963 assert(devinfo->gen < 6);
1964
1965 /* Example code doesn't set predicate_control for send
1966 * instructions.
1967 */
1968 brw_inst_set_pred_control(devinfo, insn, 0);
1969 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1970
1971 brw_set_dest(p, insn, dest);
1972 brw_set_src0(p, insn, src);
1973 brw_set_math_message(p,
1974 insn,
1975 function,
1976 src.type == BRW_REGISTER_TYPE_D,
1977 precision,
1978 data_type);
1979 }
1980
1981 void gen6_math(struct brw_codegen *p,
1982 struct brw_reg dest,
1983 unsigned function,
1984 struct brw_reg src0,
1985 struct brw_reg src1)
1986 {
1987 const struct gen_device_info *devinfo = p->devinfo;
1988 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1989
1990 assert(devinfo->gen >= 6);
1991
1992 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1993 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1994
1995 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1996 if (devinfo->gen == 6) {
1997 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1998 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1999 }
2000
2001 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
2002 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
2003 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
2004 assert(src0.type != BRW_REGISTER_TYPE_F);
2005 assert(src1.type != BRW_REGISTER_TYPE_F);
2006 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
2007 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
2008 } else {
2009 assert(src0.type == BRW_REGISTER_TYPE_F);
2010 assert(src1.type == BRW_REGISTER_TYPE_F);
2011 }
2012
2013 /* Source modifiers are ignored for extended math instructions on Gen6. */
2014 if (devinfo->gen == 6) {
2015 assert(!src0.negate);
2016 assert(!src0.abs);
2017 assert(!src1.negate);
2018 assert(!src1.abs);
2019 }
2020
2021 brw_inst_set_math_function(devinfo, insn, function);
2022
2023 brw_set_dest(p, insn, dest);
2024 brw_set_src0(p, insn, src0);
2025 brw_set_src1(p, insn, src1);
2026 }
2027
2028 /**
2029 * Return the right surface index to access the thread scratch space using
2030 * stateless dataport messages.
2031 */
2032 unsigned
2033 brw_scratch_surface_idx(const struct brw_codegen *p)
2034 {
2035 /* The scratch space is thread-local so IA coherency is unnecessary. */
2036 if (p->devinfo->gen >= 8)
2037 return GEN8_BTI_STATELESS_NON_COHERENT;
2038 else
2039 return BRW_BTI_STATELESS;
2040 }
2041
2042 /**
2043 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
2044 * using a constant offset per channel.
2045 *
2046 * The offset must be aligned to oword size (16 bytes). Used for
2047 * register spilling.
2048 */
2049 void brw_oword_block_write_scratch(struct brw_codegen *p,
2050 struct brw_reg mrf,
2051 int num_regs,
2052 unsigned offset)
2053 {
2054 const struct gen_device_info *devinfo = p->devinfo;
2055 const unsigned target_cache =
2056 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2057 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2058 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2059 uint32_t msg_type;
2060
2061 if (devinfo->gen >= 6)
2062 offset /= 16;
2063
2064 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2065
2066 const unsigned mlen = 1 + num_regs;
2067
2068 /* Set up the message header. This is g0, with g0.2 filled with
2069 * the offset. We don't want to leave our offset around in g0 or
2070 * it'll screw up texture samples, so set it up inside the message
2071 * reg.
2072 */
2073 {
2074 brw_push_insn_state(p);
2075 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2076 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2077 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2078
2079 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2080
2081 /* set message header global offset field (reg 0, element 2) */
2082 brw_MOV(p,
2083 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2084 mrf.nr,
2085 2), BRW_REGISTER_TYPE_UD),
2086 brw_imm_ud(offset));
2087
2088 brw_pop_insn_state(p);
2089 }
2090
2091 {
2092 struct brw_reg dest;
2093 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2094 int send_commit_msg;
2095 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2096 BRW_REGISTER_TYPE_UW);
2097
2098 brw_inst_set_compression(devinfo, insn, false);
2099
2100 if (brw_inst_exec_size(devinfo, insn) >= 16)
2101 src_header = vec16(src_header);
2102
2103 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2104 if (devinfo->gen < 6)
2105 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2106
2107 /* Until gen6, writes followed by reads from the same location
2108 * are not guaranteed to be ordered unless write_commit is set.
2109 * If set, then a no-op write is issued to the destination
2110 * register to set a dependency, and a read from the destination
2111 * can be used to ensure the ordering.
2112 *
2113 * For gen6, only writes between different threads need ordering
2114 * protection. Our use of DP writes is all about register
2115 * spilling within a thread.
2116 */
2117 if (devinfo->gen >= 6) {
2118 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2119 send_commit_msg = 0;
2120 } else {
2121 dest = src_header;
2122 send_commit_msg = 1;
2123 }
2124
2125 brw_set_dest(p, insn, dest);
2126 if (devinfo->gen >= 6) {
2127 brw_set_src0(p, insn, mrf);
2128 } else {
2129 brw_set_src0(p, insn, brw_null_reg());
2130 }
2131
2132 if (devinfo->gen >= 6)
2133 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2134 else
2135 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2136
2137 brw_set_dp_write_message(p,
2138 insn,
2139 brw_scratch_surface_idx(p),
2140 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2141 msg_type,
2142 target_cache,
2143 mlen,
2144 true, /* header_present */
2145 0, /* not a render target */
2146 send_commit_msg, /* response_length */
2147 0, /* eot */
2148 send_commit_msg);
2149 }
2150 }
2151
2152
2153 /**
2154 * Read a block of owords (half a GRF each) from the scratch buffer
2155 * using a constant index per channel.
2156 *
2157 * Offset must be aligned to oword size (16 bytes). Used for register
2158 * spilling.
2159 */
2160 void
2161 brw_oword_block_read_scratch(struct brw_codegen *p,
2162 struct brw_reg dest,
2163 struct brw_reg mrf,
2164 int num_regs,
2165 unsigned offset)
2166 {
2167 const struct gen_device_info *devinfo = p->devinfo;
2168
2169 if (devinfo->gen >= 6)
2170 offset /= 16;
2171
2172 if (p->devinfo->gen >= 7) {
2173 /* On gen 7 and above, we no longer have message registers and we can
2174 * send from any register we want. By using the destination register
2175 * for the message, we guarantee that the implied message write won't
2176 * accidentally overwrite anything. This has been a problem because
2177 * the MRF registers and source for the final FB write are both fixed
2178 * and may overlap.
2179 */
2180 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2181 } else {
2182 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2183 }
2184 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2185
2186 const unsigned rlen = num_regs;
2187 const unsigned target_cache =
2188 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2189 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2190 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2191
2192 {
2193 brw_push_insn_state(p);
2194 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2195 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2196 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2197
2198 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2199
2200 /* set message header global offset field (reg 0, element 2) */
2201 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2202
2203 brw_pop_insn_state(p);
2204 }
2205
2206 {
2207 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2208
2209 assert(brw_inst_pred_control(devinfo, insn) == 0);
2210 brw_inst_set_compression(devinfo, insn, false);
2211
2212 brw_set_dest(p, insn, dest); /* UW? */
2213 if (devinfo->gen >= 6) {
2214 brw_set_src0(p, insn, mrf);
2215 } else {
2216 brw_set_src0(p, insn, brw_null_reg());
2217 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2218 }
2219
2220 brw_set_dp_read_message(p,
2221 insn,
2222 brw_scratch_surface_idx(p),
2223 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2224 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2225 target_cache,
2226 1, /* msg_length */
2227 true, /* header_present */
2228 rlen);
2229 }
2230 }
2231
2232 void
2233 gen7_block_read_scratch(struct brw_codegen *p,
2234 struct brw_reg dest,
2235 int num_regs,
2236 unsigned offset)
2237 {
2238 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2239 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2240
2241 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2242
2243 /* The HW requires that the header is present; this is to get the g0.5
2244 * scratch offset.
2245 */
2246 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2247
2248 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2249 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2250 * is 32 bytes, which happens to be the size of a register.
2251 */
2252 offset /= REG_SIZE;
2253 assert(offset < (1 << 12));
2254
2255 gen7_set_dp_scratch_message(p, insn,
2256 false, /* scratch read */
2257 false, /* OWords */
2258 false, /* invalidate after read */
2259 num_regs,
2260 offset,
2261 1, /* mlen: just g0 */
2262 num_regs, /* rlen */
2263 true); /* header present */
2264 }
2265
2266 /**
2267 * Read float[4] vectors from the data port constant cache.
2268 * Location (in buffer) should be a multiple of 16.
2269 * Used for fetching shader constants.
2270 */
2271 void brw_oword_block_read(struct brw_codegen *p,
2272 struct brw_reg dest,
2273 struct brw_reg mrf,
2274 uint32_t offset,
2275 uint32_t bind_table_index)
2276 {
2277 const struct gen_device_info *devinfo = p->devinfo;
2278 const unsigned target_cache =
2279 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2280 BRW_DATAPORT_READ_TARGET_DATA_CACHE);
2281 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
2282
2283 /* On newer hardware, offset is in units of owords. */
2284 if (devinfo->gen >= 6)
2285 offset /= 16;
2286
2287 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2288
2289 brw_push_insn_state(p);
2290 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2291 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2292 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2293
2294 brw_push_insn_state(p);
2295 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2296 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2297
2298 /* set message header global offset field (reg 0, element 2) */
2299 brw_MOV(p,
2300 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2301 mrf.nr,
2302 2), BRW_REGISTER_TYPE_UD),
2303 brw_imm_ud(offset));
2304 brw_pop_insn_state(p);
2305
2306 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2307
2308 /* cast dest to a uword[8] vector */
2309 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2310
2311 brw_set_dest(p, insn, dest);
2312 if (devinfo->gen >= 6) {
2313 brw_set_src0(p, insn, mrf);
2314 } else {
2315 brw_set_src0(p, insn, brw_null_reg());
2316 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2317 }
2318
2319 brw_set_dp_read_message(p, insn, bind_table_index,
2320 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2321 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2322 target_cache,
2323 1, /* msg_length */
2324 true, /* header_present */
2325 DIV_ROUND_UP(exec_size, 8)); /* response_length */
2326
2327 brw_pop_insn_state(p);
2328 }
2329
2330
2331 void brw_fb_WRITE(struct brw_codegen *p,
2332 struct brw_reg payload,
2333 struct brw_reg implied_header,
2334 unsigned msg_control,
2335 unsigned binding_table_index,
2336 unsigned msg_length,
2337 unsigned response_length,
2338 bool eot,
2339 bool last_render_target,
2340 bool header_present)
2341 {
2342 const struct gen_device_info *devinfo = p->devinfo;
2343 const unsigned target_cache =
2344 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2345 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2346 brw_inst *insn;
2347 unsigned msg_type;
2348 struct brw_reg dest, src0;
2349
2350 if (brw_inst_exec_size(devinfo, p->current) >= BRW_EXECUTE_16)
2351 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2352 else
2353 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2354
2355 if (devinfo->gen >= 6) {
2356 insn = next_insn(p, BRW_OPCODE_SENDC);
2357 } else {
2358 insn = next_insn(p, BRW_OPCODE_SEND);
2359 }
2360 brw_inst_set_compression(devinfo, insn, false);
2361
2362 if (devinfo->gen >= 6) {
2363 /* headerless version, just submit color payload */
2364 src0 = payload;
2365
2366 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2367 } else {
2368 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2369 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2370 src0 = implied_header;
2371
2372 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2373 }
2374
2375 brw_set_dest(p, insn, dest);
2376 brw_set_src0(p, insn, src0);
2377 brw_set_dp_write_message(p,
2378 insn,
2379 binding_table_index,
2380 msg_control,
2381 msg_type,
2382 target_cache,
2383 msg_length,
2384 header_present,
2385 last_render_target,
2386 response_length,
2387 eot,
2388 0 /* send_commit_msg */);
2389 }
2390
2391 brw_inst *
2392 gen9_fb_READ(struct brw_codegen *p,
2393 struct brw_reg dst,
2394 struct brw_reg payload,
2395 unsigned binding_table_index,
2396 unsigned msg_length,
2397 unsigned response_length,
2398 bool per_sample)
2399 {
2400 const struct gen_device_info *devinfo = p->devinfo;
2401 assert(devinfo->gen >= 9);
2402 const unsigned msg_subtype =
2403 brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16 ? 0 : 1;
2404 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2405
2406 brw_set_dest(p, insn, dst);
2407 brw_set_src0(p, insn, payload);
2408 brw_set_dp_read_message(p, insn, binding_table_index,
2409 per_sample << 5 | msg_subtype,
2410 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2411 GEN6_SFID_DATAPORT_RENDER_CACHE,
2412 msg_length, true /* header_present */,
2413 response_length);
2414 brw_inst_set_rt_slot_group(devinfo, insn,
2415 brw_inst_qtr_control(devinfo, p->current) / 2);
2416
2417 return insn;
2418 }
2419
2420 /**
2421 * Texture sample instruction.
2422 * Note: the msg_type plus msg_length values determine exactly what kind
2423 * of sampling operation is performed. See volume 4, page 161 of docs.
2424 */
2425 void brw_SAMPLE(struct brw_codegen *p,
2426 struct brw_reg dest,
2427 unsigned msg_reg_nr,
2428 struct brw_reg src0,
2429 unsigned binding_table_index,
2430 unsigned sampler,
2431 unsigned msg_type,
2432 unsigned response_length,
2433 unsigned msg_length,
2434 unsigned header_present,
2435 unsigned simd_mode,
2436 unsigned return_format)
2437 {
2438 const struct gen_device_info *devinfo = p->devinfo;
2439 brw_inst *insn;
2440
2441 if (msg_reg_nr != -1)
2442 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2443
2444 insn = next_insn(p, BRW_OPCODE_SEND);
2445 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2446
2447 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2448 *
2449 * "Instruction compression is not allowed for this instruction (that
2450 * is, send). The hardware behavior is undefined if this instruction is
2451 * set as compressed. However, compress control can be set to "SecHalf"
2452 * to affect the EMask generation."
2453 *
2454 * No similar wording is found in later PRMs, but there are examples
2455 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2456 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2457 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2458 */
2459 brw_inst_set_compression(devinfo, insn, false);
2460
2461 if (devinfo->gen < 6)
2462 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2463
2464 brw_set_dest(p, insn, dest);
2465 brw_set_src0(p, insn, src0);
2466 brw_set_sampler_message(p, insn,
2467 binding_table_index,
2468 sampler,
2469 msg_type,
2470 response_length,
2471 msg_length,
2472 header_present,
2473 simd_mode,
2474 return_format);
2475 }
2476
2477 /* Adjust the message header's sampler state pointer to
2478 * select the correct group of 16 samplers.
2479 */
2480 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2481 struct brw_reg header,
2482 struct brw_reg sampler_index)
2483 {
2484 /* The "Sampler Index" field can only store values between 0 and 15.
2485 * However, we can add an offset to the "Sampler State Pointer"
2486 * field, effectively selecting a different set of 16 samplers.
2487 *
2488 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2489 * offset, and each sampler state is only 16-bytes, so we can't
2490 * exclusively use the offset - we have to use both.
2491 */
2492
2493 const struct gen_device_info *devinfo = p->devinfo;
2494
2495 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2496 const int sampler_state_size = 16; /* 16 bytes */
2497 uint32_t sampler = sampler_index.ud;
2498
2499 if (sampler >= 16) {
2500 assert(devinfo->is_haswell || devinfo->gen >= 8);
2501 brw_ADD(p,
2502 get_element_ud(header, 3),
2503 get_element_ud(brw_vec8_grf(0, 0), 3),
2504 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2505 }
2506 } else {
2507 /* Non-const sampler array indexing case */
2508 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2509 return;
2510 }
2511
2512 struct brw_reg temp = get_element_ud(header, 3);
2513
2514 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2515 brw_SHL(p, temp, temp, brw_imm_ud(4));
2516 brw_ADD(p,
2517 get_element_ud(header, 3),
2518 get_element_ud(brw_vec8_grf(0, 0), 3),
2519 temp);
2520 }
2521 }
2522
2523 /* All these variables are pretty confusing - we might be better off
2524 * using bitmasks and macros for this, in the old style. Or perhaps
2525 * just having the caller instantiate the fields in dword3 itself.
2526 */
2527 void brw_urb_WRITE(struct brw_codegen *p,
2528 struct brw_reg dest,
2529 unsigned msg_reg_nr,
2530 struct brw_reg src0,
2531 enum brw_urb_write_flags flags,
2532 unsigned msg_length,
2533 unsigned response_length,
2534 unsigned offset,
2535 unsigned swizzle)
2536 {
2537 const struct gen_device_info *devinfo = p->devinfo;
2538 brw_inst *insn;
2539
2540 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2541
2542 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2543 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2544 brw_push_insn_state(p);
2545 brw_set_default_access_mode(p, BRW_ALIGN_1);
2546 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2547 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2548 BRW_REGISTER_TYPE_UD),
2549 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2550 brw_imm_ud(0xff00));
2551 brw_pop_insn_state(p);
2552 }
2553
2554 insn = next_insn(p, BRW_OPCODE_SEND);
2555
2556 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2557
2558 brw_set_dest(p, insn, dest);
2559 brw_set_src0(p, insn, src0);
2560 brw_set_src1(p, insn, brw_imm_d(0));
2561
2562 if (devinfo->gen < 6)
2563 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2564
2565 brw_set_urb_message(p,
2566 insn,
2567 flags,
2568 msg_length,
2569 response_length,
2570 offset,
2571 swizzle);
2572 }
2573
2574 struct brw_inst *
2575 brw_send_indirect_message(struct brw_codegen *p,
2576 unsigned sfid,
2577 struct brw_reg dst,
2578 struct brw_reg payload,
2579 struct brw_reg desc)
2580 {
2581 const struct gen_device_info *devinfo = p->devinfo;
2582 struct brw_inst *send;
2583 int setup;
2584
2585 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2586
2587 assert(desc.type == BRW_REGISTER_TYPE_UD);
2588
2589 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2590 * in the indirect case) by its index in the instruction store. The
2591 * pointer returned by next_insn() may become invalid if emitting the SEND
2592 * in the indirect case reallocs the store.
2593 */
2594
2595 if (desc.file == BRW_IMMEDIATE_VALUE) {
2596 setup = p->nr_insn;
2597 send = next_insn(p, BRW_OPCODE_SEND);
2598 brw_set_src1(p, send, desc);
2599
2600 } else {
2601 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2602
2603 brw_push_insn_state(p);
2604 brw_set_default_access_mode(p, BRW_ALIGN_1);
2605 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2606 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2607
2608 /* Load the indirect descriptor to an address register using OR so the
2609 * caller can specify additional descriptor bits with the usual
2610 * brw_set_*_message() helper functions.
2611 */
2612 setup = p->nr_insn;
2613 brw_OR(p, addr, desc, brw_imm_ud(0));
2614
2615 brw_pop_insn_state(p);
2616
2617 send = next_insn(p, BRW_OPCODE_SEND);
2618 brw_set_src1(p, send, addr);
2619 }
2620
2621 if (dst.width < BRW_EXECUTE_8)
2622 brw_inst_set_exec_size(devinfo, send, dst.width);
2623
2624 brw_set_dest(p, send, dst);
2625 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2626 brw_inst_set_sfid(devinfo, send, sfid);
2627
2628 return &p->store[setup];
2629 }
2630
2631 static struct brw_inst *
2632 brw_send_indirect_surface_message(struct brw_codegen *p,
2633 unsigned sfid,
2634 struct brw_reg dst,
2635 struct brw_reg payload,
2636 struct brw_reg surface,
2637 unsigned message_len,
2638 unsigned response_len,
2639 bool header_present)
2640 {
2641 const struct gen_device_info *devinfo = p->devinfo;
2642 struct brw_inst *insn;
2643
2644 if (surface.file != BRW_IMMEDIATE_VALUE) {
2645 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2646
2647 brw_push_insn_state(p);
2648 brw_set_default_access_mode(p, BRW_ALIGN_1);
2649 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2650 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2651
2652 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2653 * some surface array is accessed out of bounds.
2654 */
2655 insn = brw_AND(p, addr,
2656 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2657 BRW_GET_SWZ(surface.swizzle, 0)),
2658 brw_imm_ud(0xff));
2659
2660 brw_pop_insn_state(p);
2661
2662 surface = addr;
2663 }
2664
2665 insn = brw_send_indirect_message(p, sfid, dst, payload, surface);
2666 brw_inst_set_mlen(devinfo, insn, message_len);
2667 brw_inst_set_rlen(devinfo, insn, response_len);
2668 brw_inst_set_header_present(devinfo, insn, header_present);
2669
2670 return insn;
2671 }
2672
2673 static bool
2674 while_jumps_before_offset(const struct gen_device_info *devinfo,
2675 brw_inst *insn, int while_offset, int start_offset)
2676 {
2677 int scale = 16 / brw_jump_scale(devinfo);
2678 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2679 : brw_inst_jip(devinfo, insn);
2680 assert(jip < 0);
2681 return while_offset + jip * scale <= start_offset;
2682 }
2683
2684
2685 static int
2686 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2687 {
2688 int offset;
2689 void *store = p->store;
2690 const struct gen_device_info *devinfo = p->devinfo;
2691
2692 int depth = 0;
2693
2694 for (offset = next_offset(devinfo, store, start_offset);
2695 offset < p->next_insn_offset;
2696 offset = next_offset(devinfo, store, offset)) {
2697 brw_inst *insn = store + offset;
2698
2699 switch (brw_inst_opcode(devinfo, insn)) {
2700 case BRW_OPCODE_IF:
2701 depth++;
2702 break;
2703 case BRW_OPCODE_ENDIF:
2704 if (depth == 0)
2705 return offset;
2706 depth--;
2707 break;
2708 case BRW_OPCODE_WHILE:
2709 /* If the while doesn't jump before our instruction, it's the end
2710 * of a sibling do...while loop. Ignore it.
2711 */
2712 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2713 continue;
2714 /* fallthrough */
2715 case BRW_OPCODE_ELSE:
2716 case BRW_OPCODE_HALT:
2717 if (depth == 0)
2718 return offset;
2719 }
2720 }
2721
2722 return 0;
2723 }
2724
2725 /* There is no DO instruction on gen6, so to find the end of the loop
2726 * we have to see if the loop is jumping back before our start
2727 * instruction.
2728 */
2729 static int
2730 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2731 {
2732 const struct gen_device_info *devinfo = p->devinfo;
2733 int offset;
2734 void *store = p->store;
2735
2736 assert(devinfo->gen >= 6);
2737
2738 /* Always start after the instruction (such as a WHILE) we're trying to fix
2739 * up.
2740 */
2741 for (offset = next_offset(devinfo, store, start_offset);
2742 offset < p->next_insn_offset;
2743 offset = next_offset(devinfo, store, offset)) {
2744 brw_inst *insn = store + offset;
2745
2746 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2747 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2748 return offset;
2749 }
2750 }
2751 assert(!"not reached");
2752 return start_offset;
2753 }
2754
2755 /* After program generation, go back and update the UIP and JIP of
2756 * BREAK, CONT, and HALT instructions to their correct locations.
2757 */
2758 void
2759 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2760 {
2761 const struct gen_device_info *devinfo = p->devinfo;
2762 int offset;
2763 int br = brw_jump_scale(devinfo);
2764 int scale = 16 / br;
2765 void *store = p->store;
2766
2767 if (devinfo->gen < 6)
2768 return;
2769
2770 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2771 brw_inst *insn = store + offset;
2772 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2773
2774 int block_end_offset = brw_find_next_block_end(p, offset);
2775 switch (brw_inst_opcode(devinfo, insn)) {
2776 case BRW_OPCODE_BREAK:
2777 assert(block_end_offset != 0);
2778 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2779 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2780 brw_inst_set_uip(devinfo, insn,
2781 (brw_find_loop_end(p, offset) - offset +
2782 (devinfo->gen == 6 ? 16 : 0)) / scale);
2783 break;
2784 case BRW_OPCODE_CONTINUE:
2785 assert(block_end_offset != 0);
2786 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2787 brw_inst_set_uip(devinfo, insn,
2788 (brw_find_loop_end(p, offset) - offset) / scale);
2789
2790 assert(brw_inst_uip(devinfo, insn) != 0);
2791 assert(brw_inst_jip(devinfo, insn) != 0);
2792 break;
2793
2794 case BRW_OPCODE_ENDIF: {
2795 int32_t jump = (block_end_offset == 0) ?
2796 1 * br : (block_end_offset - offset) / scale;
2797 if (devinfo->gen >= 7)
2798 brw_inst_set_jip(devinfo, insn, jump);
2799 else
2800 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2801 break;
2802 }
2803
2804 case BRW_OPCODE_HALT:
2805 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2806 *
2807 * "In case of the halt instruction not inside any conditional
2808 * code block, the value of <JIP> and <UIP> should be the
2809 * same. In case of the halt instruction inside conditional code
2810 * block, the <UIP> should be the end of the program, and the
2811 * <JIP> should be end of the most inner conditional code block."
2812 *
2813 * The uip will have already been set by whoever set up the
2814 * instruction.
2815 */
2816 if (block_end_offset == 0) {
2817 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2818 } else {
2819 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2820 }
2821 assert(brw_inst_uip(devinfo, insn) != 0);
2822 assert(brw_inst_jip(devinfo, insn) != 0);
2823 break;
2824 }
2825 }
2826 }
2827
2828 void brw_ff_sync(struct brw_codegen *p,
2829 struct brw_reg dest,
2830 unsigned msg_reg_nr,
2831 struct brw_reg src0,
2832 bool allocate,
2833 unsigned response_length,
2834 bool eot)
2835 {
2836 const struct gen_device_info *devinfo = p->devinfo;
2837 brw_inst *insn;
2838
2839 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2840
2841 insn = next_insn(p, BRW_OPCODE_SEND);
2842 brw_set_dest(p, insn, dest);
2843 brw_set_src0(p, insn, src0);
2844 brw_set_src1(p, insn, brw_imm_d(0));
2845
2846 if (devinfo->gen < 6)
2847 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2848
2849 brw_set_ff_sync_message(p,
2850 insn,
2851 allocate,
2852 response_length,
2853 eot);
2854 }
2855
2856 /**
2857 * Emit the SEND instruction necessary to generate stream output data on Gen6
2858 * (for transform feedback).
2859 *
2860 * If send_commit_msg is true, this is the last piece of stream output data
2861 * from this thread, so send the data as a committed write. According to the
2862 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2863 *
2864 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2865 * writes are complete by sending the final write as a committed write."
2866 */
2867 void
2868 brw_svb_write(struct brw_codegen *p,
2869 struct brw_reg dest,
2870 unsigned msg_reg_nr,
2871 struct brw_reg src0,
2872 unsigned binding_table_index,
2873 bool send_commit_msg)
2874 {
2875 const struct gen_device_info *devinfo = p->devinfo;
2876 const unsigned target_cache =
2877 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2878 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2879 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2880 brw_inst *insn;
2881
2882 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2883
2884 insn = next_insn(p, BRW_OPCODE_SEND);
2885 brw_set_dest(p, insn, dest);
2886 brw_set_src0(p, insn, src0);
2887 brw_set_src1(p, insn, brw_imm_d(0));
2888 brw_set_dp_write_message(p, insn,
2889 binding_table_index,
2890 0, /* msg_control: ignored */
2891 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2892 target_cache,
2893 1, /* msg_length */
2894 true, /* header_present */
2895 0, /* last_render_target: ignored */
2896 send_commit_msg, /* response_length */
2897 0, /* end_of_thread */
2898 send_commit_msg); /* send_commit_msg */
2899 }
2900
2901 static unsigned
2902 brw_surface_payload_size(struct brw_codegen *p,
2903 unsigned num_channels,
2904 bool has_simd4x2,
2905 bool has_simd16)
2906 {
2907 if (has_simd4x2 &&
2908 brw_inst_access_mode(p->devinfo, p->current) == BRW_ALIGN_16)
2909 return 1;
2910 else if (has_simd16 &&
2911 brw_inst_exec_size(p->devinfo, p->current) == BRW_EXECUTE_16)
2912 return 2 * num_channels;
2913 else
2914 return num_channels;
2915 }
2916
2917 static void
2918 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2919 brw_inst *insn,
2920 unsigned atomic_op,
2921 bool response_expected)
2922 {
2923 const struct gen_device_info *devinfo = p->devinfo;
2924 unsigned msg_control =
2925 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2926 (response_expected ? 1 << 5 : 0); /* Return data expected */
2927
2928 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2929 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2930 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2931 msg_control |= 1 << 4; /* SIMD8 mode */
2932
2933 brw_inst_set_dp_msg_type(devinfo, insn,
2934 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2935 } else {
2936 brw_inst_set_dp_msg_type(devinfo, insn,
2937 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2938 }
2939 } else {
2940 brw_inst_set_dp_msg_type(devinfo, insn,
2941 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2942
2943 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2944 msg_control |= 1 << 4; /* SIMD8 mode */
2945 }
2946
2947 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2948 }
2949
2950 void
2951 brw_untyped_atomic(struct brw_codegen *p,
2952 struct brw_reg dst,
2953 struct brw_reg payload,
2954 struct brw_reg surface,
2955 unsigned atomic_op,
2956 unsigned msg_length,
2957 bool response_expected)
2958 {
2959 const struct gen_device_info *devinfo = p->devinfo;
2960 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2961 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2962 GEN7_SFID_DATAPORT_DATA_CACHE);
2963 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2964 /* Mask out unused components -- This is especially important in Align16
2965 * mode on generations that don't have native support for SIMD4x2 atomics,
2966 * because unused but enabled components will cause the dataport to perform
2967 * additional atomic operations on the addresses that happen to be in the
2968 * uninitialized Y, Z and W coordinates of the payload.
2969 */
2970 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2971 struct brw_inst *insn = brw_send_indirect_surface_message(
2972 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2973 brw_surface_payload_size(p, response_expected,
2974 devinfo->gen >= 8 || devinfo->is_haswell, true),
2975 align1);
2976
2977 brw_set_dp_untyped_atomic_message(
2978 p, insn, atomic_op, response_expected);
2979 }
2980
2981 static void
2982 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2983 struct brw_inst *insn,
2984 unsigned num_channels)
2985 {
2986 const struct gen_device_info *devinfo = p->devinfo;
2987 /* Set mask of 32-bit channels to drop. */
2988 unsigned msg_control = 0xf & (0xf << num_channels);
2989
2990 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2991 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2992 msg_control |= 1 << 4; /* SIMD16 mode */
2993 else
2994 msg_control |= 2 << 4; /* SIMD8 mode */
2995 }
2996
2997 brw_inst_set_dp_msg_type(devinfo, insn,
2998 (devinfo->gen >= 8 || devinfo->is_haswell ?
2999 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
3000 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
3001 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3002 }
3003
3004 void
3005 brw_untyped_surface_read(struct brw_codegen *p,
3006 struct brw_reg dst,
3007 struct brw_reg payload,
3008 struct brw_reg surface,
3009 unsigned msg_length,
3010 unsigned num_channels)
3011 {
3012 const struct gen_device_info *devinfo = p->devinfo;
3013 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3014 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3015 GEN7_SFID_DATAPORT_DATA_CACHE);
3016 struct brw_inst *insn = brw_send_indirect_surface_message(
3017 p, sfid, dst, payload, surface, msg_length,
3018 brw_surface_payload_size(p, num_channels, true, true),
3019 false);
3020
3021 brw_set_dp_untyped_surface_read_message(
3022 p, insn, num_channels);
3023 }
3024
3025 static void
3026 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
3027 struct brw_inst *insn,
3028 unsigned num_channels)
3029 {
3030 const struct gen_device_info *devinfo = p->devinfo;
3031 /* Set mask of 32-bit channels to drop. */
3032 unsigned msg_control = 0xf & (0xf << num_channels);
3033
3034 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3035 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
3036 msg_control |= 1 << 4; /* SIMD16 mode */
3037 else
3038 msg_control |= 2 << 4; /* SIMD8 mode */
3039 } else {
3040 if (devinfo->gen >= 8 || devinfo->is_haswell)
3041 msg_control |= 0 << 4; /* SIMD4x2 mode */
3042 else
3043 msg_control |= 2 << 4; /* SIMD8 mode */
3044 }
3045
3046 brw_inst_set_dp_msg_type(devinfo, insn,
3047 devinfo->gen >= 8 || devinfo->is_haswell ?
3048 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
3049 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
3050 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3051 }
3052
3053 void
3054 brw_untyped_surface_write(struct brw_codegen *p,
3055 struct brw_reg payload,
3056 struct brw_reg surface,
3057 unsigned msg_length,
3058 unsigned num_channels)
3059 {
3060 const struct gen_device_info *devinfo = p->devinfo;
3061 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3062 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3063 GEN7_SFID_DATAPORT_DATA_CACHE);
3064 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3065 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3066 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3067 WRITEMASK_X : WRITEMASK_XYZW;
3068 struct brw_inst *insn = brw_send_indirect_surface_message(
3069 p, sfid, brw_writemask(brw_null_reg(), mask),
3070 payload, surface, msg_length, 0, align1);
3071
3072 brw_set_dp_untyped_surface_write_message(
3073 p, insn, num_channels);
3074 }
3075
3076 static void
3077 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3078 struct brw_inst *insn,
3079 unsigned atomic_op,
3080 bool response_expected)
3081 {
3082 const struct gen_device_info *devinfo = p->devinfo;
3083 unsigned msg_control =
3084 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3085 (response_expected ? 1 << 5 : 0); /* Return data expected */
3086
3087 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3088 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3089 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3090 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3091
3092 brw_inst_set_dp_msg_type(devinfo, insn,
3093 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3094 } else {
3095 brw_inst_set_dp_msg_type(devinfo, insn,
3096 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3097 }
3098
3099 } else {
3100 brw_inst_set_dp_msg_type(devinfo, insn,
3101 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3102
3103 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3104 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3105 }
3106
3107 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3108 }
3109
3110 void
3111 brw_typed_atomic(struct brw_codegen *p,
3112 struct brw_reg dst,
3113 struct brw_reg payload,
3114 struct brw_reg surface,
3115 unsigned atomic_op,
3116 unsigned msg_length,
3117 bool response_expected) {
3118 const struct gen_device_info *devinfo = p->devinfo;
3119 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3120 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3121 GEN6_SFID_DATAPORT_RENDER_CACHE);
3122 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3123 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3124 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3125 struct brw_inst *insn = brw_send_indirect_surface_message(
3126 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3127 brw_surface_payload_size(p, response_expected,
3128 devinfo->gen >= 8 || devinfo->is_haswell, false),
3129 true);
3130
3131 brw_set_dp_typed_atomic_message(
3132 p, insn, atomic_op, response_expected);
3133 }
3134
3135 static void
3136 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3137 struct brw_inst *insn,
3138 unsigned num_channels)
3139 {
3140 const struct gen_device_info *devinfo = p->devinfo;
3141 /* Set mask of unused channels. */
3142 unsigned msg_control = 0xf & (0xf << num_channels);
3143
3144 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3145 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3146 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3147 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3148 else
3149 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3150 }
3151
3152 brw_inst_set_dp_msg_type(devinfo, insn,
3153 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3154 } else {
3155 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3156 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3157 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3158 }
3159
3160 brw_inst_set_dp_msg_type(devinfo, insn,
3161 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3162 }
3163
3164 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3165 }
3166
3167 void
3168 brw_typed_surface_read(struct brw_codegen *p,
3169 struct brw_reg dst,
3170 struct brw_reg payload,
3171 struct brw_reg surface,
3172 unsigned msg_length,
3173 unsigned num_channels)
3174 {
3175 const struct gen_device_info *devinfo = p->devinfo;
3176 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3177 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3178 GEN6_SFID_DATAPORT_RENDER_CACHE);
3179 struct brw_inst *insn = brw_send_indirect_surface_message(
3180 p, sfid, dst, payload, surface, msg_length,
3181 brw_surface_payload_size(p, num_channels,
3182 devinfo->gen >= 8 || devinfo->is_haswell, false),
3183 true);
3184
3185 brw_set_dp_typed_surface_read_message(
3186 p, insn, num_channels);
3187 }
3188
3189 static void
3190 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3191 struct brw_inst *insn,
3192 unsigned num_channels)
3193 {
3194 const struct gen_device_info *devinfo = p->devinfo;
3195 /* Set mask of unused channels. */
3196 unsigned msg_control = 0xf & (0xf << num_channels);
3197
3198 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3199 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3200 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3201 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3202 else
3203 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3204 }
3205
3206 brw_inst_set_dp_msg_type(devinfo, insn,
3207 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3208
3209 } else {
3210 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3211 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3212 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3213 }
3214
3215 brw_inst_set_dp_msg_type(devinfo, insn,
3216 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3217 }
3218
3219 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3220 }
3221
3222 void
3223 brw_typed_surface_write(struct brw_codegen *p,
3224 struct brw_reg payload,
3225 struct brw_reg surface,
3226 unsigned msg_length,
3227 unsigned num_channels)
3228 {
3229 const struct gen_device_info *devinfo = p->devinfo;
3230 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3231 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3232 GEN6_SFID_DATAPORT_RENDER_CACHE);
3233 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3234 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3235 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3236 WRITEMASK_X : WRITEMASK_XYZW);
3237 struct brw_inst *insn = brw_send_indirect_surface_message(
3238 p, sfid, brw_writemask(brw_null_reg(), mask),
3239 payload, surface, msg_length, 0, true);
3240
3241 brw_set_dp_typed_surface_write_message(
3242 p, insn, num_channels);
3243 }
3244
3245 static void
3246 brw_set_memory_fence_message(struct brw_codegen *p,
3247 struct brw_inst *insn,
3248 enum brw_message_target sfid,
3249 bool commit_enable)
3250 {
3251 const struct gen_device_info *devinfo = p->devinfo;
3252
3253 brw_set_message_descriptor(p, insn, sfid,
3254 1 /* message length */,
3255 (commit_enable ? 1 : 0) /* response length */,
3256 true /* header present */,
3257 false);
3258
3259 switch (sfid) {
3260 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3261 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3262 break;
3263 case GEN7_SFID_DATAPORT_DATA_CACHE:
3264 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3265 break;
3266 default:
3267 unreachable("Not reached");
3268 }
3269
3270 if (commit_enable)
3271 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3272 }
3273
3274 void
3275 brw_memory_fence(struct brw_codegen *p,
3276 struct brw_reg dst)
3277 {
3278 const struct gen_device_info *devinfo = p->devinfo;
3279 const bool commit_enable = devinfo->gen == 7 && !devinfo->is_haswell;
3280 struct brw_inst *insn;
3281
3282 brw_push_insn_state(p);
3283 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3284 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3285 dst = vec1(dst);
3286
3287 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3288 * message doesn't write anything back.
3289 */
3290 insn = next_insn(p, BRW_OPCODE_SEND);
3291 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3292 brw_set_dest(p, insn, dst);
3293 brw_set_src0(p, insn, dst);
3294 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3295 commit_enable);
3296
3297 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3298 /* IVB does typed surface access through the render cache, so we need to
3299 * flush it too. Use a different register so both flushes can be
3300 * pipelined by the hardware.
3301 */
3302 insn = next_insn(p, BRW_OPCODE_SEND);
3303 brw_set_dest(p, insn, offset(dst, 1));
3304 brw_set_src0(p, insn, offset(dst, 1));
3305 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3306 commit_enable);
3307
3308 /* Now write the response of the second message into the response of the
3309 * first to trigger a pipeline stall -- This way future render and data
3310 * cache messages will be properly ordered with respect to past data and
3311 * render cache messages.
3312 */
3313 brw_MOV(p, dst, offset(dst, 1));
3314 }
3315
3316 brw_pop_insn_state(p);
3317 }
3318
3319 void
3320 brw_pixel_interpolator_query(struct brw_codegen *p,
3321 struct brw_reg dest,
3322 struct brw_reg mrf,
3323 bool noperspective,
3324 unsigned mode,
3325 struct brw_reg data,
3326 unsigned msg_length,
3327 unsigned response_length)
3328 {
3329 const struct gen_device_info *devinfo = p->devinfo;
3330 struct brw_inst *insn;
3331 const uint16_t exec_size = brw_inst_exec_size(devinfo, p->current);
3332
3333 /* brw_send_indirect_message will automatically use a direct send message
3334 * if data is actually immediate.
3335 */
3336 insn = brw_send_indirect_message(p,
3337 GEN7_SFID_PIXEL_INTERPOLATOR,
3338 dest,
3339 mrf,
3340 vec1(data));
3341 brw_inst_set_mlen(devinfo, insn, msg_length);
3342 brw_inst_set_rlen(devinfo, insn, response_length);
3343
3344 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3345 brw_inst_set_pi_slot_group(devinfo, insn, 0); /* zero unless 32/64px dispatch */
3346 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3347 brw_inst_set_pi_message_type(devinfo, insn, mode);
3348 }
3349
3350 void
3351 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3352 struct brw_reg mask)
3353 {
3354 const struct gen_device_info *devinfo = p->devinfo;
3355 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
3356 const unsigned qtr_control = brw_inst_qtr_control(devinfo, p->current);
3357 brw_inst *inst;
3358
3359 assert(devinfo->gen >= 7);
3360 assert(mask.type == BRW_REGISTER_TYPE_UD);
3361
3362 brw_push_insn_state(p);
3363
3364 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3365 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3366
3367 if (devinfo->gen >= 8) {
3368 /* Getting the first active channel index is easy on Gen8: Just find
3369 * the first bit set in the execution mask. The register exists on
3370 * HSW already but it reads back as all ones when the current
3371 * instruction has execution masking disabled, so it's kind of
3372 * useless.
3373 */
3374 struct brw_reg exec_mask =
3375 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3376
3377 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3378 /* Unfortunately, ce0 does not take into account the thread
3379 * dispatch mask, which may be a problem in cases where it's not
3380 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3381 * some n). Combine ce0 with the given dispatch (or vector) mask
3382 * to mask off those channels which were never dispatched by the
3383 * hardware.
3384 */
3385 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3386 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3387 exec_mask = vec1(dst);
3388 }
3389
3390 /* Quarter control has the effect of magically shifting the value of
3391 * ce0 so you'll get the first active channel relative to the
3392 * specified quarter control as result.
3393 */
3394 inst = brw_FBL(p, vec1(dst), exec_mask);
3395 } else {
3396 const struct brw_reg flag = brw_flag_reg(1, 0);
3397
3398 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3399
3400 /* Run enough instructions returning zero with execution masking and
3401 * a conditional modifier enabled in order to get the full execution
3402 * mask in f1.0. We could use a single 32-wide move here if it
3403 * weren't because of the hardware bug that causes channel enables to
3404 * be applied incorrectly to the second half of 32-wide instructions
3405 * on Gen7.
3406 */
3407 const unsigned lower_size = MIN2(16, exec_size);
3408 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3409 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3410 brw_imm_uw(0));
3411 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3412 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3413 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3414 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3415 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3416 }
3417
3418 /* Find the first bit set in the exec_size-wide portion of the flag
3419 * register that was updated by the last sequence of MOV
3420 * instructions.
3421 */
3422 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3423 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3424 }
3425 } else {
3426 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3427
3428 if (devinfo->gen >= 8 &&
3429 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3430 /* In SIMD4x2 mode the first active channel index is just the
3431 * negation of the first bit of the mask register. Note that ce0
3432 * doesn't take into account the dispatch mask, so the Gen7 path
3433 * should be used instead unless you have the guarantee that the
3434 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3435 * for some n).
3436 */
3437 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3438 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3439 brw_imm_ud(1));
3440
3441 } else {
3442 /* Overwrite the destination without and with execution masking to
3443 * find out which of the channels is active.
3444 */
3445 brw_push_insn_state(p);
3446 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3447 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3448 brw_imm_ud(1));
3449
3450 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3451 brw_imm_ud(0));
3452 brw_pop_insn_state(p);
3453 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3454 }
3455 }
3456
3457 brw_pop_insn_state(p);
3458 }
3459
3460 void
3461 brw_broadcast(struct brw_codegen *p,
3462 struct brw_reg dst,
3463 struct brw_reg src,
3464 struct brw_reg idx)
3465 {
3466 const struct gen_device_info *devinfo = p->devinfo;
3467 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3468 brw_inst *inst;
3469
3470 brw_push_insn_state(p);
3471 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3472 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3473
3474 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3475 src.address_mode == BRW_ADDRESS_DIRECT);
3476
3477 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3478 idx.file == BRW_IMMEDIATE_VALUE) {
3479 /* Trivial, the source is already uniform or the index is a constant.
3480 * We will typically not get here if the optimizer is doing its job, but
3481 * asserting would be mean.
3482 */
3483 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3484 brw_MOV(p, dst,
3485 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3486 stride(suboffset(src, 4 * i), 0, 4, 1)));
3487 } else {
3488 if (align1) {
3489 const struct brw_reg addr =
3490 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3491 const unsigned offset = src.nr * REG_SIZE + src.subnr;
3492 /* Limit in bytes of the signed indirect addressing immediate. */
3493 const unsigned limit = 512;
3494
3495 brw_push_insn_state(p);
3496 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3497 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3498
3499 /* Take into account the component size and horizontal stride. */
3500 assert(src.vstride == src.hstride + src.width);
3501 brw_SHL(p, addr, vec1(idx),
3502 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3503 src.hstride - 1));
3504
3505 /* We can only address up to limit bytes using the indirect
3506 * addressing immediate, account for the difference if the source
3507 * register is above this limit.
3508 */
3509 if (offset >= limit)
3510 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3511
3512 brw_pop_insn_state(p);
3513
3514 /* Use indirect addressing to fetch the specified component. */
3515 brw_MOV(p, dst,
3516 retype(brw_vec1_indirect(addr.subnr, offset % limit),
3517 src.type));
3518 } else {
3519 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3520 * to all bits of a flag register,
3521 */
3522 inst = brw_MOV(p,
3523 brw_null_reg(),
3524 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3525 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3526 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3527 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3528
3529 /* and use predicated SEL to pick the right channel. */
3530 inst = brw_SEL(p, dst,
3531 stride(suboffset(src, 4), 4, 4, 1),
3532 stride(src, 4, 4, 1));
3533 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3534 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3535 }
3536 }
3537
3538 brw_pop_insn_state(p);
3539 }
3540
3541 /**
3542 * This instruction is generated as a single-channel align1 instruction by
3543 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3544 *
3545 * We can't use the typed atomic op in the FS because that has the execution
3546 * mask ANDed with the pixel mask, but we just want to write the one dword for
3547 * all the pixels.
3548 *
3549 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3550 * one u32. So we use the same untyped atomic write message as the pixel
3551 * shader.
3552 *
3553 * The untyped atomic operation requires a BUFFER surface type with RAW
3554 * format, and is only accessible through the legacy DATA_CACHE dataport
3555 * messages.
3556 */
3557 void brw_shader_time_add(struct brw_codegen *p,
3558 struct brw_reg payload,
3559 uint32_t surf_index)
3560 {
3561 const unsigned sfid = (p->devinfo->gen >= 8 || p->devinfo->is_haswell ?
3562 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3563 GEN7_SFID_DATAPORT_DATA_CACHE);
3564 assert(p->devinfo->gen >= 7);
3565
3566 brw_push_insn_state(p);
3567 brw_set_default_access_mode(p, BRW_ALIGN_1);
3568 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3569 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3570 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3571
3572 /* We use brw_vec1_reg and unmasked because we want to increment the given
3573 * offset only once.
3574 */
3575 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3576 BRW_ARF_NULL, 0));
3577 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3578 payload.nr, 0));
3579 brw_set_src1(p, send, brw_imm_ud(0));
3580 brw_set_message_descriptor(p, send, sfid, 2, 0, false, false);
3581 brw_inst_set_binding_table_index(p->devinfo, send, surf_index);
3582 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3583
3584 brw_pop_insn_state(p);
3585 }
3586
3587
3588 /**
3589 * Emit the SEND message for a barrier
3590 */
3591 void
3592 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3593 {
3594 const struct gen_device_info *devinfo = p->devinfo;
3595 struct brw_inst *inst;
3596
3597 assert(devinfo->gen >= 7);
3598
3599 brw_push_insn_state(p);
3600 brw_set_default_access_mode(p, BRW_ALIGN_1);
3601 inst = next_insn(p, BRW_OPCODE_SEND);
3602 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3603 brw_set_src0(p, inst, src);
3604 brw_set_src1(p, inst, brw_null_reg());
3605
3606 brw_set_message_descriptor(p, inst, BRW_SFID_MESSAGE_GATEWAY,
3607 1 /* msg_length */,
3608 0 /* response_length */,
3609 false /* header_present */,
3610 false /* end_of_thread */);
3611
3612 brw_inst_set_gateway_notify(devinfo, inst, 1);
3613 brw_inst_set_gateway_subfuncid(devinfo, inst,
3614 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3615
3616 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3617 brw_pop_insn_state(p);
3618 }
3619
3620
3621 /**
3622 * Emit the wait instruction for a barrier
3623 */
3624 void
3625 brw_WAIT(struct brw_codegen *p)
3626 {
3627 const struct gen_device_info *devinfo = p->devinfo;
3628 struct brw_inst *insn;
3629
3630 struct brw_reg src = brw_notification_reg();
3631
3632 insn = next_insn(p, BRW_OPCODE_WAIT);
3633 brw_set_dest(p, insn, src);
3634 brw_set_src0(p, insn, src);
3635 brw_set_src1(p, insn, brw_null_reg());
3636
3637 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3638 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3639 }