i965: Reorder brw_reg_type enum values
[mesa.git] / src / intel / compiler / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_eu_defines.h"
34 #include "brw_eu.h"
35
36 #include "util/ralloc.h"
37
38 /**
39 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
40 * registers, implicitly moving the operand to a message register.
41 *
42 * On Sandybridge, this is no longer the case. This function performs the
43 * explicit move; it should be called before emitting a SEND instruction.
44 */
45 void
46 gen6_resolve_implied_move(struct brw_codegen *p,
47 struct brw_reg *src,
48 unsigned msg_reg_nr)
49 {
50 const struct gen_device_info *devinfo = p->devinfo;
51 if (devinfo->gen < 6)
52 return;
53
54 if (src->file == BRW_MESSAGE_REGISTER_FILE)
55 return;
56
57 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
58 brw_push_insn_state(p);
59 brw_set_default_exec_size(p, BRW_EXECUTE_8);
60 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
61 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
62 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
63 retype(*src, BRW_REGISTER_TYPE_UD));
64 brw_pop_insn_state(p);
65 }
66 *src = brw_message_reg(msg_reg_nr);
67 }
68
69 static void
70 gen7_convert_mrf_to_grf(struct brw_codegen *p, struct brw_reg *reg)
71 {
72 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
73 * "The send with EOT should use register space R112-R127 for <src>. This is
74 * to enable loading of a new thread into the same slot while the message
75 * with EOT for current thread is pending dispatch."
76 *
77 * Since we're pretending to have 16 MRFs anyway, we may as well use the
78 * registers required for messages with EOT.
79 */
80 const struct gen_device_info *devinfo = p->devinfo;
81 if (devinfo->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
82 reg->file = BRW_GENERAL_REGISTER_FILE;
83 reg->nr += GEN7_MRF_HACK_START;
84 }
85 }
86
87 /**
88 * Convert a brw_reg_type enumeration value into the hardware representation.
89 *
90 * The hardware encoding may depend on whether the value is an immediate.
91 */
92 unsigned
93 brw_reg_type_to_hw_type(const struct gen_device_info *devinfo,
94 enum brw_reg_type type, enum brw_reg_file file)
95 {
96 if (file == BRW_IMMEDIATE_VALUE) {
97 static const int imm_hw_types[] = {
98 [BRW_REGISTER_TYPE_UD] = BRW_HW_REG_TYPE_UD,
99 [BRW_REGISTER_TYPE_D] = BRW_HW_REG_TYPE_D,
100 [BRW_REGISTER_TYPE_UW] = BRW_HW_REG_TYPE_UW,
101 [BRW_REGISTER_TYPE_W] = BRW_HW_REG_TYPE_W,
102 [BRW_REGISTER_TYPE_F] = BRW_HW_REG_TYPE_F,
103 [BRW_REGISTER_TYPE_UB] = -1,
104 [BRW_REGISTER_TYPE_B] = -1,
105 [BRW_REGISTER_TYPE_UV] = BRW_HW_REG_IMM_TYPE_UV,
106 [BRW_REGISTER_TYPE_VF] = BRW_HW_REG_IMM_TYPE_VF,
107 [BRW_REGISTER_TYPE_V] = BRW_HW_REG_IMM_TYPE_V,
108 [BRW_REGISTER_TYPE_DF] = GEN8_HW_REG_IMM_TYPE_DF,
109 [BRW_REGISTER_TYPE_HF] = GEN8_HW_REG_IMM_TYPE_HF,
110 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_REG_TYPE_UQ,
111 [BRW_REGISTER_TYPE_Q] = GEN8_HW_REG_TYPE_Q,
112 };
113 assert(type < ARRAY_SIZE(imm_hw_types));
114 assert(imm_hw_types[type] != -1);
115 return imm_hw_types[type];
116 } else {
117 /* Non-immediate registers */
118 static const int hw_types[] = {
119 [BRW_REGISTER_TYPE_UD] = BRW_HW_REG_TYPE_UD,
120 [BRW_REGISTER_TYPE_D] = BRW_HW_REG_TYPE_D,
121 [BRW_REGISTER_TYPE_UW] = BRW_HW_REG_TYPE_UW,
122 [BRW_REGISTER_TYPE_W] = BRW_HW_REG_TYPE_W,
123 [BRW_REGISTER_TYPE_UB] = BRW_HW_REG_NON_IMM_TYPE_UB,
124 [BRW_REGISTER_TYPE_B] = BRW_HW_REG_NON_IMM_TYPE_B,
125 [BRW_REGISTER_TYPE_F] = BRW_HW_REG_TYPE_F,
126 [BRW_REGISTER_TYPE_UV] = -1,
127 [BRW_REGISTER_TYPE_VF] = -1,
128 [BRW_REGISTER_TYPE_V] = -1,
129 [BRW_REGISTER_TYPE_DF] = GEN7_HW_REG_NON_IMM_TYPE_DF,
130 [BRW_REGISTER_TYPE_HF] = GEN8_HW_REG_NON_IMM_TYPE_HF,
131 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_REG_TYPE_UQ,
132 [BRW_REGISTER_TYPE_Q] = GEN8_HW_REG_TYPE_Q,
133 };
134 assert(type < ARRAY_SIZE(hw_types));
135 assert(hw_types[type] != -1);
136 return hw_types[type];
137 }
138 }
139
140 /**
141 * Return the element size given a hardware register type and file.
142 *
143 * The hardware encoding may depend on whether the value is an immediate.
144 */
145 unsigned
146 brw_hw_reg_type_to_size(const struct gen_device_info *devinfo,
147 unsigned type, enum brw_reg_file file)
148 {
149 if (file == BRW_IMMEDIATE_VALUE) {
150 static const unsigned imm_hw_sizes[] = {
151 [BRW_HW_REG_TYPE_UD] = 4,
152 [BRW_HW_REG_TYPE_D] = 4,
153 [BRW_HW_REG_TYPE_UW] = 2,
154 [BRW_HW_REG_TYPE_W] = 2,
155 [BRW_HW_REG_IMM_TYPE_UV] = 2,
156 [BRW_HW_REG_IMM_TYPE_VF] = 4,
157 [BRW_HW_REG_IMM_TYPE_V] = 2,
158 [BRW_HW_REG_TYPE_F] = 4,
159 [GEN8_HW_REG_TYPE_UQ] = 8,
160 [GEN8_HW_REG_TYPE_Q] = 8,
161 [GEN8_HW_REG_IMM_TYPE_DF] = 8,
162 [GEN8_HW_REG_IMM_TYPE_HF] = 2,
163 };
164 assert(type < ARRAY_SIZE(imm_hw_sizes));
165 assert(devinfo->gen >= 6 || type != BRW_HW_REG_IMM_TYPE_UV);
166 assert(devinfo->gen >= 8 || type <= BRW_HW_REG_TYPE_F);
167 return imm_hw_sizes[type];
168 } else {
169 /* Non-immediate registers */
170 static const unsigned hw_sizes[] = {
171 [BRW_HW_REG_TYPE_UD] = 4,
172 [BRW_HW_REG_TYPE_D] = 4,
173 [BRW_HW_REG_TYPE_UW] = 2,
174 [BRW_HW_REG_TYPE_W] = 2,
175 [BRW_HW_REG_NON_IMM_TYPE_UB] = 1,
176 [BRW_HW_REG_NON_IMM_TYPE_B] = 1,
177 [GEN7_HW_REG_NON_IMM_TYPE_DF] = 8,
178 [BRW_HW_REG_TYPE_F] = 4,
179 [GEN8_HW_REG_TYPE_UQ] = 8,
180 [GEN8_HW_REG_TYPE_Q] = 8,
181 [GEN8_HW_REG_NON_IMM_TYPE_HF] = 2,
182 };
183 assert(type < ARRAY_SIZE(hw_sizes));
184 return hw_sizes[type];
185 }
186 }
187
188 void
189 brw_set_dest(struct brw_codegen *p, brw_inst *inst, struct brw_reg dest)
190 {
191 const struct gen_device_info *devinfo = p->devinfo;
192
193 if (dest.file == BRW_MESSAGE_REGISTER_FILE)
194 assert((dest.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
195 else if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE)
196 assert(dest.nr < 128);
197
198 gen7_convert_mrf_to_grf(p, &dest);
199
200 brw_inst_set_dst_reg_file(devinfo, inst, dest.file);
201 brw_inst_set_dst_reg_type(devinfo, inst,
202 brw_reg_type_to_hw_type(devinfo, dest.type,
203 dest.file));
204 brw_inst_set_dst_address_mode(devinfo, inst, dest.address_mode);
205
206 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
207 brw_inst_set_dst_da_reg_nr(devinfo, inst, dest.nr);
208
209 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
210 brw_inst_set_dst_da1_subreg_nr(devinfo, inst, dest.subnr);
211 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
212 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
213 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
214 } else {
215 brw_inst_set_dst_da16_subreg_nr(devinfo, inst, dest.subnr / 16);
216 brw_inst_set_da16_writemask(devinfo, inst, dest.writemask);
217 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
218 dest.file == BRW_MESSAGE_REGISTER_FILE) {
219 assert(dest.writemask != 0);
220 }
221 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
222 * Although Dst.HorzStride is a don't care for Align16, HW needs
223 * this to be programmed as "01".
224 */
225 brw_inst_set_dst_hstride(devinfo, inst, 1);
226 }
227 } else {
228 brw_inst_set_dst_ia_subreg_nr(devinfo, inst, dest.subnr);
229
230 /* These are different sizes in align1 vs align16:
231 */
232 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
233 brw_inst_set_dst_ia1_addr_imm(devinfo, inst,
234 dest.indirect_offset);
235 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
236 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
237 brw_inst_set_dst_hstride(devinfo, inst, dest.hstride);
238 } else {
239 brw_inst_set_dst_ia16_addr_imm(devinfo, inst,
240 dest.indirect_offset);
241 /* even ignored in da16, still need to set as '01' */
242 brw_inst_set_dst_hstride(devinfo, inst, 1);
243 }
244 }
245
246 /* Generators should set a default exec_size of either 8 (SIMD4x2 or SIMD8)
247 * or 16 (SIMD16), as that's normally correct. However, when dealing with
248 * small registers, we automatically reduce it to match the register size.
249 *
250 * In platforms that support fp64 we can emit instructions with a width of
251 * 4 that need two SIMD8 registers and an exec_size of 8 or 16. In these
252 * cases we need to make sure that these instructions have their exec sizes
253 * set properly when they are emitted and we can't rely on this code to fix
254 * it.
255 */
256 bool fix_exec_size;
257 if (devinfo->gen >= 6)
258 fix_exec_size = dest.width < BRW_EXECUTE_4;
259 else
260 fix_exec_size = dest.width < BRW_EXECUTE_8;
261
262 if (fix_exec_size)
263 brw_inst_set_exec_size(devinfo, inst, dest.width);
264 }
265
266 static void
267 validate_reg(const struct gen_device_info *devinfo,
268 brw_inst *inst, struct brw_reg reg)
269 {
270 const int hstride_for_reg[] = {0, 1, 2, 4};
271 const int vstride_for_reg[] = {0, 1, 2, 4, 8, 16, 32};
272 const int width_for_reg[] = {1, 2, 4, 8, 16};
273 const int execsize_for_reg[] = {1, 2, 4, 8, 16, 32};
274 int width, hstride, vstride, execsize;
275
276 if (reg.file == BRW_IMMEDIATE_VALUE)
277 return;
278
279 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
280 reg.file == BRW_ARF_NULL)
281 return;
282
283 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
284 *
285 * "Swizzling is not allowed when an accumulator is used as an implicit
286 * source or an explicit source in an instruction."
287 */
288 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
289 reg.nr == BRW_ARF_ACCUMULATOR)
290 assert(reg.swizzle == BRW_SWIZZLE_XYZW);
291
292 assert(reg.hstride < ARRAY_SIZE(hstride_for_reg));
293 hstride = hstride_for_reg[reg.hstride];
294
295 if (reg.vstride == 0xf) {
296 vstride = -1;
297 } else {
298 assert(reg.vstride >= 0 && reg.vstride < ARRAY_SIZE(vstride_for_reg));
299 vstride = vstride_for_reg[reg.vstride];
300 }
301
302 assert(reg.width >= 0 && reg.width < ARRAY_SIZE(width_for_reg));
303 width = width_for_reg[reg.width];
304
305 assert(brw_inst_exec_size(devinfo, inst) >= 0 &&
306 brw_inst_exec_size(devinfo, inst) < ARRAY_SIZE(execsize_for_reg));
307 execsize = execsize_for_reg[brw_inst_exec_size(devinfo, inst)];
308
309 /* Restrictions from 3.3.10: Register Region Restrictions. */
310 /* 3. */
311 assert(execsize >= width);
312
313 /* 4. */
314 if (execsize == width && hstride != 0) {
315 assert(vstride == -1 || vstride == width * hstride);
316 }
317
318 /* 5. */
319 if (execsize == width && hstride == 0) {
320 /* no restriction on vstride. */
321 }
322
323 /* 6. */
324 if (width == 1) {
325 assert(hstride == 0);
326 }
327
328 /* 7. */
329 if (execsize == 1 && width == 1) {
330 assert(hstride == 0);
331 assert(vstride == 0);
332 }
333
334 /* 8. */
335 if (vstride == 0 && hstride == 0) {
336 assert(width == 1);
337 }
338
339 /* 10. Check destination issues. */
340 }
341
342 void
343 brw_set_src0(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
344 {
345 const struct gen_device_info *devinfo = p->devinfo;
346
347 if (reg.file == BRW_MESSAGE_REGISTER_FILE)
348 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->gen));
349 else if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
350 assert(reg.nr < 128);
351
352 gen7_convert_mrf_to_grf(p, &reg);
353
354 if (devinfo->gen >= 6 && (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SEND ||
355 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_SENDC)) {
356 /* Any source modifiers or regions will be ignored, since this just
357 * identifies the MRF/GRF to start reading the message contents from.
358 * Check for some likely failures.
359 */
360 assert(!reg.negate);
361 assert(!reg.abs);
362 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
363 }
364
365 validate_reg(devinfo, inst, reg);
366
367 brw_inst_set_src0_reg_file(devinfo, inst, reg.file);
368 brw_inst_set_src0_reg_type(devinfo, inst,
369 brw_reg_type_to_hw_type(devinfo, reg.type, reg.file));
370 brw_inst_set_src0_abs(devinfo, inst, reg.abs);
371 brw_inst_set_src0_negate(devinfo, inst, reg.negate);
372 brw_inst_set_src0_address_mode(devinfo, inst, reg.address_mode);
373
374 if (reg.file == BRW_IMMEDIATE_VALUE) {
375 if (reg.type == BRW_REGISTER_TYPE_DF ||
376 brw_inst_opcode(devinfo, inst) == BRW_OPCODE_DIM)
377 brw_inst_set_imm_df(devinfo, inst, reg.df);
378 else if (reg.type == BRW_REGISTER_TYPE_UQ ||
379 reg.type == BRW_REGISTER_TYPE_Q)
380 brw_inst_set_imm_uq(devinfo, inst, reg.u64);
381 else
382 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
383
384 if (type_sz(reg.type) < 8) {
385 brw_inst_set_src1_reg_file(devinfo, inst,
386 BRW_ARCHITECTURE_REGISTER_FILE);
387 brw_inst_set_src1_reg_type(devinfo, inst,
388 brw_inst_src0_reg_type(devinfo, inst));
389 }
390 } else {
391 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
392 brw_inst_set_src0_da_reg_nr(devinfo, inst, reg.nr);
393 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
394 brw_inst_set_src0_da1_subreg_nr(devinfo, inst, reg.subnr);
395 } else {
396 brw_inst_set_src0_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
397 }
398 } else {
399 brw_inst_set_src0_ia_subreg_nr(devinfo, inst, reg.subnr);
400
401 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
402 brw_inst_set_src0_ia1_addr_imm(devinfo, inst, reg.indirect_offset);
403 } else {
404 brw_inst_set_src0_ia16_addr_imm(devinfo, inst, reg.indirect_offset);
405 }
406 }
407
408 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
409 if (reg.width == BRW_WIDTH_1 &&
410 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
411 brw_inst_set_src0_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
412 brw_inst_set_src0_width(devinfo, inst, BRW_WIDTH_1);
413 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
414 } else {
415 brw_inst_set_src0_hstride(devinfo, inst, reg.hstride);
416 brw_inst_set_src0_width(devinfo, inst, reg.width);
417 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
418 }
419 } else {
420 brw_inst_set_src0_da16_swiz_x(devinfo, inst,
421 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
422 brw_inst_set_src0_da16_swiz_y(devinfo, inst,
423 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
424 brw_inst_set_src0_da16_swiz_z(devinfo, inst,
425 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
426 brw_inst_set_src0_da16_swiz_w(devinfo, inst,
427 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
428
429 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
430 /* This is an oddity of the fact we're using the same
431 * descriptions for registers in align_16 as align_1:
432 */
433 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
434 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
435 reg.type == BRW_REGISTER_TYPE_DF &&
436 reg.vstride == BRW_VERTICAL_STRIDE_2) {
437 /* From SNB PRM:
438 *
439 * "For Align16 access mode, only encodings of 0000 and 0011
440 * are allowed. Other codes are reserved."
441 *
442 * Presumably the DevSNB behavior applies to IVB as well.
443 */
444 brw_inst_set_src0_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
445 } else {
446 brw_inst_set_src0_vstride(devinfo, inst, reg.vstride);
447 }
448 }
449 }
450 }
451
452
453 void
454 brw_set_src1(struct brw_codegen *p, brw_inst *inst, struct brw_reg reg)
455 {
456 const struct gen_device_info *devinfo = p->devinfo;
457
458 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
459 assert(reg.nr < 128);
460
461 /* From the IVB PRM Vol. 4, Pt. 3, Section 3.3.3.5:
462 *
463 * "Accumulator registers may be accessed explicitly as src0
464 * operands only."
465 */
466 assert(reg.file != BRW_ARCHITECTURE_REGISTER_FILE ||
467 reg.nr != BRW_ARF_ACCUMULATOR);
468
469 gen7_convert_mrf_to_grf(p, &reg);
470 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
471
472 validate_reg(devinfo, inst, reg);
473
474 brw_inst_set_src1_reg_file(devinfo, inst, reg.file);
475 brw_inst_set_src1_reg_type(devinfo, inst,
476 brw_reg_type_to_hw_type(devinfo, reg.type, reg.file));
477 brw_inst_set_src1_abs(devinfo, inst, reg.abs);
478 brw_inst_set_src1_negate(devinfo, inst, reg.negate);
479
480 /* Only src1 can be immediate in two-argument instructions.
481 */
482 assert(brw_inst_src0_reg_file(devinfo, inst) != BRW_IMMEDIATE_VALUE);
483
484 if (reg.file == BRW_IMMEDIATE_VALUE) {
485 /* two-argument instructions can only use 32-bit immediates */
486 assert(type_sz(reg.type) < 8);
487 brw_inst_set_imm_ud(devinfo, inst, reg.ud);
488 } else {
489 /* This is a hardware restriction, which may or may not be lifted
490 * in the future:
491 */
492 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
493 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
494
495 brw_inst_set_src1_da_reg_nr(devinfo, inst, reg.nr);
496 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
497 brw_inst_set_src1_da1_subreg_nr(devinfo, inst, reg.subnr);
498 } else {
499 brw_inst_set_src1_da16_subreg_nr(devinfo, inst, reg.subnr / 16);
500 }
501
502 if (brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_1) {
503 if (reg.width == BRW_WIDTH_1 &&
504 brw_inst_exec_size(devinfo, inst) == BRW_EXECUTE_1) {
505 brw_inst_set_src1_hstride(devinfo, inst, BRW_HORIZONTAL_STRIDE_0);
506 brw_inst_set_src1_width(devinfo, inst, BRW_WIDTH_1);
507 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_0);
508 } else {
509 brw_inst_set_src1_hstride(devinfo, inst, reg.hstride);
510 brw_inst_set_src1_width(devinfo, inst, reg.width);
511 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
512 }
513 } else {
514 brw_inst_set_src1_da16_swiz_x(devinfo, inst,
515 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_X));
516 brw_inst_set_src1_da16_swiz_y(devinfo, inst,
517 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Y));
518 brw_inst_set_src1_da16_swiz_z(devinfo, inst,
519 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_Z));
520 brw_inst_set_src1_da16_swiz_w(devinfo, inst,
521 BRW_GET_SWZ(reg.swizzle, BRW_CHANNEL_W));
522
523 if (reg.vstride == BRW_VERTICAL_STRIDE_8) {
524 /* This is an oddity of the fact we're using the same
525 * descriptions for registers in align_16 as align_1:
526 */
527 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
528 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
529 reg.type == BRW_REGISTER_TYPE_DF &&
530 reg.vstride == BRW_VERTICAL_STRIDE_2) {
531 /* From SNB PRM:
532 *
533 * "For Align16 access mode, only encodings of 0000 and 0011
534 * are allowed. Other codes are reserved."
535 *
536 * Presumably the DevSNB behavior applies to IVB as well.
537 */
538 brw_inst_set_src1_vstride(devinfo, inst, BRW_VERTICAL_STRIDE_4);
539 } else {
540 brw_inst_set_src1_vstride(devinfo, inst, reg.vstride);
541 }
542 }
543 }
544 }
545
546 /**
547 * Set the Message Descriptor and Extended Message Descriptor fields
548 * for SEND messages.
549 *
550 * \note This zeroes out the Function Control bits, so it must be called
551 * \b before filling out any message-specific data. Callers can
552 * choose not to fill in irrelevant bits; they will be zero.
553 */
554 void
555 brw_set_message_descriptor(struct brw_codegen *p,
556 brw_inst *inst,
557 enum brw_message_target sfid,
558 unsigned msg_length,
559 unsigned response_length,
560 bool header_present,
561 bool end_of_thread)
562 {
563 const struct gen_device_info *devinfo = p->devinfo;
564
565 brw_set_src1(p, inst, brw_imm_d(0));
566
567 /* For indirect sends, `inst` will not be the SEND/SENDC instruction
568 * itself; instead, it will be a MOV/OR into the address register.
569 *
570 * In this case, we avoid setting the extended message descriptor bits,
571 * since they go on the later SEND/SENDC instead and if set here would
572 * instead clobber the conditionalmod bits.
573 */
574 unsigned opcode = brw_inst_opcode(devinfo, inst);
575 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
576 brw_inst_set_sfid(devinfo, inst, sfid);
577 }
578
579 brw_inst_set_mlen(devinfo, inst, msg_length);
580 brw_inst_set_rlen(devinfo, inst, response_length);
581 brw_inst_set_eot(devinfo, inst, end_of_thread);
582
583 if (devinfo->gen >= 5) {
584 brw_inst_set_header_present(devinfo, inst, header_present);
585 }
586 }
587
588 static void brw_set_math_message( struct brw_codegen *p,
589 brw_inst *inst,
590 unsigned function,
591 unsigned integer_type,
592 bool low_precision,
593 unsigned dataType )
594 {
595 const struct gen_device_info *devinfo = p->devinfo;
596 unsigned msg_length;
597 unsigned response_length;
598
599 /* Infer message length from the function */
600 switch (function) {
601 case BRW_MATH_FUNCTION_POW:
602 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
603 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
604 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
605 msg_length = 2;
606 break;
607 default:
608 msg_length = 1;
609 break;
610 }
611
612 /* Infer response length from the function */
613 switch (function) {
614 case BRW_MATH_FUNCTION_SINCOS:
615 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
616 response_length = 2;
617 break;
618 default:
619 response_length = 1;
620 break;
621 }
622
623
624 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
625 msg_length, response_length, false, false);
626 brw_inst_set_math_msg_function(devinfo, inst, function);
627 brw_inst_set_math_msg_signed_int(devinfo, inst, integer_type);
628 brw_inst_set_math_msg_precision(devinfo, inst, low_precision);
629 brw_inst_set_math_msg_saturate(devinfo, inst, brw_inst_saturate(devinfo, inst));
630 brw_inst_set_math_msg_data_type(devinfo, inst, dataType);
631 brw_inst_set_saturate(devinfo, inst, 0);
632 }
633
634
635 static void brw_set_ff_sync_message(struct brw_codegen *p,
636 brw_inst *insn,
637 bool allocate,
638 unsigned response_length,
639 bool end_of_thread)
640 {
641 const struct gen_device_info *devinfo = p->devinfo;
642
643 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
644 1, response_length, true, end_of_thread);
645 brw_inst_set_urb_opcode(devinfo, insn, 1); /* FF_SYNC */
646 brw_inst_set_urb_allocate(devinfo, insn, allocate);
647 /* The following fields are not used by FF_SYNC: */
648 brw_inst_set_urb_global_offset(devinfo, insn, 0);
649 brw_inst_set_urb_swizzle_control(devinfo, insn, 0);
650 brw_inst_set_urb_used(devinfo, insn, 0);
651 brw_inst_set_urb_complete(devinfo, insn, 0);
652 }
653
654 static void brw_set_urb_message( struct brw_codegen *p,
655 brw_inst *insn,
656 enum brw_urb_write_flags flags,
657 unsigned msg_length,
658 unsigned response_length,
659 unsigned offset,
660 unsigned swizzle_control )
661 {
662 const struct gen_device_info *devinfo = p->devinfo;
663
664 assert(devinfo->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
665 assert(devinfo->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
666 assert(devinfo->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
667
668 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
669 msg_length, response_length, true,
670 flags & BRW_URB_WRITE_EOT);
671
672 if (flags & BRW_URB_WRITE_OWORD) {
673 assert(msg_length == 2); /* header + one OWORD of data */
674 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_OWORD);
675 } else {
676 brw_inst_set_urb_opcode(devinfo, insn, BRW_URB_OPCODE_WRITE_HWORD);
677 }
678
679 brw_inst_set_urb_global_offset(devinfo, insn, offset);
680 brw_inst_set_urb_swizzle_control(devinfo, insn, swizzle_control);
681
682 if (devinfo->gen < 8) {
683 brw_inst_set_urb_complete(devinfo, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
684 }
685
686 if (devinfo->gen < 7) {
687 brw_inst_set_urb_allocate(devinfo, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
688 brw_inst_set_urb_used(devinfo, insn, !(flags & BRW_URB_WRITE_UNUSED));
689 } else {
690 brw_inst_set_urb_per_slot_offset(devinfo, insn,
691 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
692 }
693 }
694
695 void
696 brw_set_dp_write_message(struct brw_codegen *p,
697 brw_inst *insn,
698 unsigned binding_table_index,
699 unsigned msg_control,
700 unsigned msg_type,
701 unsigned target_cache,
702 unsigned msg_length,
703 bool header_present,
704 unsigned last_render_target,
705 unsigned response_length,
706 unsigned end_of_thread,
707 unsigned send_commit_msg)
708 {
709 const struct gen_device_info *devinfo = p->devinfo;
710 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
711 BRW_SFID_DATAPORT_WRITE);
712
713 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
714 header_present, end_of_thread);
715
716 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
717 brw_inst_set_dp_write_msg_type(devinfo, insn, msg_type);
718 brw_inst_set_dp_write_msg_control(devinfo, insn, msg_control);
719 brw_inst_set_rt_last(devinfo, insn, last_render_target);
720 if (devinfo->gen < 7) {
721 brw_inst_set_dp_write_commit(devinfo, insn, send_commit_msg);
722 }
723 }
724
725 void
726 brw_set_dp_read_message(struct brw_codegen *p,
727 brw_inst *insn,
728 unsigned binding_table_index,
729 unsigned msg_control,
730 unsigned msg_type,
731 unsigned target_cache,
732 unsigned msg_length,
733 bool header_present,
734 unsigned response_length)
735 {
736 const struct gen_device_info *devinfo = p->devinfo;
737 const unsigned sfid = (devinfo->gen >= 6 ? target_cache :
738 BRW_SFID_DATAPORT_READ);
739
740 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
741 header_present, false);
742
743 brw_inst_set_binding_table_index(devinfo, insn, binding_table_index);
744 brw_inst_set_dp_read_msg_type(devinfo, insn, msg_type);
745 brw_inst_set_dp_read_msg_control(devinfo, insn, msg_control);
746 if (devinfo->gen < 6)
747 brw_inst_set_dp_read_target_cache(devinfo, insn, target_cache);
748 }
749
750 void
751 brw_set_sampler_message(struct brw_codegen *p,
752 brw_inst *inst,
753 unsigned binding_table_index,
754 unsigned sampler,
755 unsigned msg_type,
756 unsigned response_length,
757 unsigned msg_length,
758 unsigned header_present,
759 unsigned simd_mode,
760 unsigned return_format)
761 {
762 const struct gen_device_info *devinfo = p->devinfo;
763
764 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
765 response_length, header_present, false);
766
767 brw_inst_set_binding_table_index(devinfo, inst, binding_table_index);
768 brw_inst_set_sampler(devinfo, inst, sampler);
769 brw_inst_set_sampler_msg_type(devinfo, inst, msg_type);
770 if (devinfo->gen >= 5) {
771 brw_inst_set_sampler_simd_mode(devinfo, inst, simd_mode);
772 } else if (devinfo->gen == 4 && !devinfo->is_g4x) {
773 brw_inst_set_sampler_return_format(devinfo, inst, return_format);
774 }
775 }
776
777 static void
778 gen7_set_dp_scratch_message(struct brw_codegen *p,
779 brw_inst *inst,
780 bool write,
781 bool dword,
782 bool invalidate_after_read,
783 unsigned num_regs,
784 unsigned addr_offset,
785 unsigned mlen,
786 unsigned rlen,
787 bool header_present)
788 {
789 const struct gen_device_info *devinfo = p->devinfo;
790 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
791 (devinfo->gen >= 8 && num_regs == 8));
792 const unsigned block_size = (devinfo->gen >= 8 ? _mesa_logbase2(num_regs) :
793 num_regs - 1);
794
795 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
796 mlen, rlen, header_present, false);
797 brw_inst_set_dp_category(devinfo, inst, 1); /* Scratch Block Read/Write msgs */
798 brw_inst_set_scratch_read_write(devinfo, inst, write);
799 brw_inst_set_scratch_type(devinfo, inst, dword);
800 brw_inst_set_scratch_invalidate_after_read(devinfo, inst, invalidate_after_read);
801 brw_inst_set_scratch_block_size(devinfo, inst, block_size);
802 brw_inst_set_scratch_addr_offset(devinfo, inst, addr_offset);
803 }
804
805 #define next_insn brw_next_insn
806 brw_inst *
807 brw_next_insn(struct brw_codegen *p, unsigned opcode)
808 {
809 const struct gen_device_info *devinfo = p->devinfo;
810 brw_inst *insn;
811
812 if (p->nr_insn + 1 > p->store_size) {
813 p->store_size <<= 1;
814 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
815 }
816
817 p->next_insn_offset += 16;
818 insn = &p->store[p->nr_insn++];
819 memcpy(insn, p->current, sizeof(*insn));
820
821 brw_inst_set_opcode(devinfo, insn, opcode);
822 return insn;
823 }
824
825 static brw_inst *
826 brw_alu1(struct brw_codegen *p, unsigned opcode,
827 struct brw_reg dest, struct brw_reg src)
828 {
829 brw_inst *insn = next_insn(p, opcode);
830 brw_set_dest(p, insn, dest);
831 brw_set_src0(p, insn, src);
832 return insn;
833 }
834
835 static brw_inst *
836 brw_alu2(struct brw_codegen *p, unsigned opcode,
837 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
838 {
839 /* 64-bit immediates are only supported on 1-src instructions */
840 assert(src0.file != BRW_IMMEDIATE_VALUE || type_sz(src0.type) <= 4);
841 assert(src1.file != BRW_IMMEDIATE_VALUE || type_sz(src1.type) <= 4);
842
843 brw_inst *insn = next_insn(p, opcode);
844 brw_set_dest(p, insn, dest);
845 brw_set_src0(p, insn, src0);
846 brw_set_src1(p, insn, src1);
847 return insn;
848 }
849
850 static int
851 get_3src_subreg_nr(struct brw_reg reg)
852 {
853 /* Normally, SubRegNum is in bytes (0..31). However, 3-src instructions
854 * use 32-bit units (components 0..7). Since they only support F/D/UD
855 * types, this doesn't lose any flexibility, but uses fewer bits.
856 */
857 return reg.subnr / 4;
858 }
859
860 static brw_inst *
861 brw_alu3(struct brw_codegen *p, unsigned opcode, struct brw_reg dest,
862 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
863 {
864 const struct gen_device_info *devinfo = p->devinfo;
865 brw_inst *inst = next_insn(p, opcode);
866
867 gen7_convert_mrf_to_grf(p, &dest);
868
869 assert(brw_inst_access_mode(devinfo, inst) == BRW_ALIGN_16);
870
871 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
872 dest.file == BRW_MESSAGE_REGISTER_FILE);
873 assert(dest.nr < 128);
874 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
875 assert(dest.type == BRW_REGISTER_TYPE_F ||
876 dest.type == BRW_REGISTER_TYPE_DF ||
877 dest.type == BRW_REGISTER_TYPE_D ||
878 dest.type == BRW_REGISTER_TYPE_UD);
879 if (devinfo->gen == 6) {
880 brw_inst_set_3src_dst_reg_file(devinfo, inst,
881 dest.file == BRW_MESSAGE_REGISTER_FILE);
882 }
883 brw_inst_set_3src_dst_reg_nr(devinfo, inst, dest.nr);
884 brw_inst_set_3src_dst_subreg_nr(devinfo, inst, dest.subnr / 16);
885 brw_inst_set_3src_dst_writemask(devinfo, inst, dest.writemask);
886
887 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
888 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
889 assert(src0.nr < 128);
890 brw_inst_set_3src_src0_swizzle(devinfo, inst, src0.swizzle);
891 brw_inst_set_3src_src0_subreg_nr(devinfo, inst, get_3src_subreg_nr(src0));
892 brw_inst_set_3src_src0_reg_nr(devinfo, inst, src0.nr);
893 brw_inst_set_3src_src0_abs(devinfo, inst, src0.abs);
894 brw_inst_set_3src_src0_negate(devinfo, inst, src0.negate);
895 brw_inst_set_3src_src0_rep_ctrl(devinfo, inst,
896 src0.vstride == BRW_VERTICAL_STRIDE_0);
897
898 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
899 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
900 assert(src1.nr < 128);
901 brw_inst_set_3src_src1_swizzle(devinfo, inst, src1.swizzle);
902 brw_inst_set_3src_src1_subreg_nr(devinfo, inst, get_3src_subreg_nr(src1));
903 brw_inst_set_3src_src1_reg_nr(devinfo, inst, src1.nr);
904 brw_inst_set_3src_src1_abs(devinfo, inst, src1.abs);
905 brw_inst_set_3src_src1_negate(devinfo, inst, src1.negate);
906 brw_inst_set_3src_src1_rep_ctrl(devinfo, inst,
907 src1.vstride == BRW_VERTICAL_STRIDE_0);
908
909 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
910 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
911 assert(src2.nr < 128);
912 brw_inst_set_3src_src2_swizzle(devinfo, inst, src2.swizzle);
913 brw_inst_set_3src_src2_subreg_nr(devinfo, inst, get_3src_subreg_nr(src2));
914 brw_inst_set_3src_src2_reg_nr(devinfo, inst, src2.nr);
915 brw_inst_set_3src_src2_abs(devinfo, inst, src2.abs);
916 brw_inst_set_3src_src2_negate(devinfo, inst, src2.negate);
917 brw_inst_set_3src_src2_rep_ctrl(devinfo, inst,
918 src2.vstride == BRW_VERTICAL_STRIDE_0);
919
920 if (devinfo->gen >= 7) {
921 /* Set both the source and destination types based on dest.type,
922 * ignoring the source register types. The MAD and LRP emitters ensure
923 * that all four types are float. The BFE and BFI2 emitters, however,
924 * may send us mixed D and UD types and want us to ignore that and use
925 * the destination type.
926 */
927 switch (dest.type) {
928 case BRW_REGISTER_TYPE_F:
929 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_F);
930 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_F);
931 break;
932 case BRW_REGISTER_TYPE_DF:
933 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_DF);
934 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_DF);
935 break;
936 case BRW_REGISTER_TYPE_D:
937 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_D);
938 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_D);
939 break;
940 case BRW_REGISTER_TYPE_UD:
941 brw_inst_set_3src_src_type(devinfo, inst, BRW_3SRC_TYPE_UD);
942 brw_inst_set_3src_dst_type(devinfo, inst, BRW_3SRC_TYPE_UD);
943 break;
944 default:
945 unreachable("not reached");
946 }
947 }
948
949 return inst;
950 }
951
952
953 /***********************************************************************
954 * Convenience routines.
955 */
956 #define ALU1(OP) \
957 brw_inst *brw_##OP(struct brw_codegen *p, \
958 struct brw_reg dest, \
959 struct brw_reg src0) \
960 { \
961 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
962 }
963
964 #define ALU2(OP) \
965 brw_inst *brw_##OP(struct brw_codegen *p, \
966 struct brw_reg dest, \
967 struct brw_reg src0, \
968 struct brw_reg src1) \
969 { \
970 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
971 }
972
973 #define ALU3(OP) \
974 brw_inst *brw_##OP(struct brw_codegen *p, \
975 struct brw_reg dest, \
976 struct brw_reg src0, \
977 struct brw_reg src1, \
978 struct brw_reg src2) \
979 { \
980 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
981 }
982
983 #define ALU3F(OP) \
984 brw_inst *brw_##OP(struct brw_codegen *p, \
985 struct brw_reg dest, \
986 struct brw_reg src0, \
987 struct brw_reg src1, \
988 struct brw_reg src2) \
989 { \
990 assert(dest.type == BRW_REGISTER_TYPE_F || \
991 dest.type == BRW_REGISTER_TYPE_DF); \
992 if (dest.type == BRW_REGISTER_TYPE_F) { \
993 assert(src0.type == BRW_REGISTER_TYPE_F); \
994 assert(src1.type == BRW_REGISTER_TYPE_F); \
995 assert(src2.type == BRW_REGISTER_TYPE_F); \
996 } else if (dest.type == BRW_REGISTER_TYPE_DF) { \
997 assert(src0.type == BRW_REGISTER_TYPE_DF); \
998 assert(src1.type == BRW_REGISTER_TYPE_DF); \
999 assert(src2.type == BRW_REGISTER_TYPE_DF); \
1000 } \
1001 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
1002 }
1003
1004 /* Rounding operations (other than RNDD) require two instructions - the first
1005 * stores a rounded value (possibly the wrong way) in the dest register, but
1006 * also sets a per-channel "increment bit" in the flag register. A predicated
1007 * add of 1.0 fixes dest to contain the desired result.
1008 *
1009 * Sandybridge and later appear to round correctly without an ADD.
1010 */
1011 #define ROUND(OP) \
1012 void brw_##OP(struct brw_codegen *p, \
1013 struct brw_reg dest, \
1014 struct brw_reg src) \
1015 { \
1016 const struct gen_device_info *devinfo = p->devinfo; \
1017 brw_inst *rnd, *add; \
1018 rnd = next_insn(p, BRW_OPCODE_##OP); \
1019 brw_set_dest(p, rnd, dest); \
1020 brw_set_src0(p, rnd, src); \
1021 \
1022 if (devinfo->gen < 6) { \
1023 /* turn on round-increments */ \
1024 brw_inst_set_cond_modifier(devinfo, rnd, BRW_CONDITIONAL_R); \
1025 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
1026 brw_inst_set_pred_control(devinfo, add, BRW_PREDICATE_NORMAL); \
1027 } \
1028 }
1029
1030
1031 ALU2(SEL)
1032 ALU1(NOT)
1033 ALU2(AND)
1034 ALU2(OR)
1035 ALU2(XOR)
1036 ALU2(SHR)
1037 ALU2(SHL)
1038 ALU1(DIM)
1039 ALU2(ASR)
1040 ALU1(FRC)
1041 ALU1(RNDD)
1042 ALU2(MAC)
1043 ALU2(MACH)
1044 ALU1(LZD)
1045 ALU2(DP4)
1046 ALU2(DPH)
1047 ALU2(DP3)
1048 ALU2(DP2)
1049 ALU3F(MAD)
1050 ALU3F(LRP)
1051 ALU1(BFREV)
1052 ALU3(BFE)
1053 ALU2(BFI1)
1054 ALU3(BFI2)
1055 ALU1(FBH)
1056 ALU1(FBL)
1057 ALU1(CBIT)
1058 ALU2(ADDC)
1059 ALU2(SUBB)
1060
1061 ROUND(RNDZ)
1062 ROUND(RNDE)
1063
1064 brw_inst *
1065 brw_MOV(struct brw_codegen *p, struct brw_reg dest, struct brw_reg src0)
1066 {
1067 const struct gen_device_info *devinfo = p->devinfo;
1068
1069 /* When converting F->DF on IVB/BYT, every odd source channel is ignored.
1070 * To avoid the problems that causes, we use a <1,2,0> source region to read
1071 * each element twice.
1072 */
1073 if (devinfo->gen == 7 && !devinfo->is_haswell &&
1074 brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1 &&
1075 dest.type == BRW_REGISTER_TYPE_DF &&
1076 (src0.type == BRW_REGISTER_TYPE_F ||
1077 src0.type == BRW_REGISTER_TYPE_D ||
1078 src0.type == BRW_REGISTER_TYPE_UD) &&
1079 !has_scalar_region(src0)) {
1080 assert(src0.vstride == BRW_VERTICAL_STRIDE_4 &&
1081 src0.width == BRW_WIDTH_4 &&
1082 src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1083
1084 src0.vstride = BRW_VERTICAL_STRIDE_1;
1085 src0.width = BRW_WIDTH_2;
1086 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1087 }
1088
1089 return brw_alu1(p, BRW_OPCODE_MOV, dest, src0);
1090 }
1091
1092 brw_inst *
1093 brw_ADD(struct brw_codegen *p, struct brw_reg dest,
1094 struct brw_reg src0, struct brw_reg src1)
1095 {
1096 /* 6.2.2: add */
1097 if (src0.type == BRW_REGISTER_TYPE_F ||
1098 (src0.file == BRW_IMMEDIATE_VALUE &&
1099 src0.type == BRW_REGISTER_TYPE_VF)) {
1100 assert(src1.type != BRW_REGISTER_TYPE_UD);
1101 assert(src1.type != BRW_REGISTER_TYPE_D);
1102 }
1103
1104 if (src1.type == BRW_REGISTER_TYPE_F ||
1105 (src1.file == BRW_IMMEDIATE_VALUE &&
1106 src1.type == BRW_REGISTER_TYPE_VF)) {
1107 assert(src0.type != BRW_REGISTER_TYPE_UD);
1108 assert(src0.type != BRW_REGISTER_TYPE_D);
1109 }
1110
1111 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1112 }
1113
1114 brw_inst *
1115 brw_AVG(struct brw_codegen *p, struct brw_reg dest,
1116 struct brw_reg src0, struct brw_reg src1)
1117 {
1118 assert(dest.type == src0.type);
1119 assert(src0.type == src1.type);
1120 switch (src0.type) {
1121 case BRW_REGISTER_TYPE_B:
1122 case BRW_REGISTER_TYPE_UB:
1123 case BRW_REGISTER_TYPE_W:
1124 case BRW_REGISTER_TYPE_UW:
1125 case BRW_REGISTER_TYPE_D:
1126 case BRW_REGISTER_TYPE_UD:
1127 break;
1128 default:
1129 unreachable("Bad type for brw_AVG");
1130 }
1131
1132 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1133 }
1134
1135 brw_inst *
1136 brw_MUL(struct brw_codegen *p, struct brw_reg dest,
1137 struct brw_reg src0, struct brw_reg src1)
1138 {
1139 /* 6.32.38: mul */
1140 if (src0.type == BRW_REGISTER_TYPE_D ||
1141 src0.type == BRW_REGISTER_TYPE_UD ||
1142 src1.type == BRW_REGISTER_TYPE_D ||
1143 src1.type == BRW_REGISTER_TYPE_UD) {
1144 assert(dest.type != BRW_REGISTER_TYPE_F);
1145 }
1146
1147 if (src0.type == BRW_REGISTER_TYPE_F ||
1148 (src0.file == BRW_IMMEDIATE_VALUE &&
1149 src0.type == BRW_REGISTER_TYPE_VF)) {
1150 assert(src1.type != BRW_REGISTER_TYPE_UD);
1151 assert(src1.type != BRW_REGISTER_TYPE_D);
1152 }
1153
1154 if (src1.type == BRW_REGISTER_TYPE_F ||
1155 (src1.file == BRW_IMMEDIATE_VALUE &&
1156 src1.type == BRW_REGISTER_TYPE_VF)) {
1157 assert(src0.type != BRW_REGISTER_TYPE_UD);
1158 assert(src0.type != BRW_REGISTER_TYPE_D);
1159 }
1160
1161 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1162 src0.nr != BRW_ARF_ACCUMULATOR);
1163 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1164 src1.nr != BRW_ARF_ACCUMULATOR);
1165
1166 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1167 }
1168
1169 brw_inst *
1170 brw_LINE(struct brw_codegen *p, struct brw_reg dest,
1171 struct brw_reg src0, struct brw_reg src1)
1172 {
1173 src0.vstride = BRW_VERTICAL_STRIDE_0;
1174 src0.width = BRW_WIDTH_1;
1175 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1176 return brw_alu2(p, BRW_OPCODE_LINE, dest, src0, src1);
1177 }
1178
1179 brw_inst *
1180 brw_PLN(struct brw_codegen *p, struct brw_reg dest,
1181 struct brw_reg src0, struct brw_reg src1)
1182 {
1183 src0.vstride = BRW_VERTICAL_STRIDE_0;
1184 src0.width = BRW_WIDTH_1;
1185 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1186 src1.vstride = BRW_VERTICAL_STRIDE_8;
1187 src1.width = BRW_WIDTH_8;
1188 src1.hstride = BRW_HORIZONTAL_STRIDE_1;
1189 return brw_alu2(p, BRW_OPCODE_PLN, dest, src0, src1);
1190 }
1191
1192 brw_inst *
1193 brw_F32TO16(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1194 {
1195 const struct gen_device_info *devinfo = p->devinfo;
1196 const bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1197 /* The F32TO16 instruction doesn't support 32-bit destination types in
1198 * Align1 mode, and neither does the Gen8 implementation in terms of a
1199 * converting MOV. Gen7 does zero out the high 16 bits in Align16 mode as
1200 * an undocumented feature.
1201 */
1202 const bool needs_zero_fill = (dst.type == BRW_REGISTER_TYPE_UD &&
1203 (!align16 || devinfo->gen >= 8));
1204 brw_inst *inst;
1205
1206 if (align16) {
1207 assert(dst.type == BRW_REGISTER_TYPE_UD);
1208 } else {
1209 assert(dst.type == BRW_REGISTER_TYPE_UD ||
1210 dst.type == BRW_REGISTER_TYPE_W ||
1211 dst.type == BRW_REGISTER_TYPE_UW ||
1212 dst.type == BRW_REGISTER_TYPE_HF);
1213 }
1214
1215 brw_push_insn_state(p);
1216
1217 if (needs_zero_fill) {
1218 brw_set_default_access_mode(p, BRW_ALIGN_1);
1219 dst = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
1220 }
1221
1222 if (devinfo->gen >= 8) {
1223 inst = brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1224 } else {
1225 assert(devinfo->gen == 7);
1226 inst = brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1227 }
1228
1229 if (needs_zero_fill) {
1230 brw_inst_set_no_dd_clear(devinfo, inst, true);
1231 inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
1232 brw_inst_set_no_dd_check(devinfo, inst, true);
1233 }
1234
1235 brw_pop_insn_state(p);
1236 return inst;
1237 }
1238
1239 brw_inst *
1240 brw_F16TO32(struct brw_codegen *p, struct brw_reg dst, struct brw_reg src)
1241 {
1242 const struct gen_device_info *devinfo = p->devinfo;
1243 bool align16 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_16;
1244
1245 if (align16) {
1246 assert(src.type == BRW_REGISTER_TYPE_UD);
1247 } else {
1248 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1249 *
1250 * Because this instruction does not have a 16-bit floating-point
1251 * type, the source data type must be Word (W). The destination type
1252 * must be F (Float).
1253 */
1254 if (src.type == BRW_REGISTER_TYPE_UD)
1255 src = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
1256
1257 assert(src.type == BRW_REGISTER_TYPE_W ||
1258 src.type == BRW_REGISTER_TYPE_UW ||
1259 src.type == BRW_REGISTER_TYPE_HF);
1260 }
1261
1262 if (devinfo->gen >= 8) {
1263 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1264 } else {
1265 assert(devinfo->gen == 7);
1266 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1267 }
1268 }
1269
1270
1271 void brw_NOP(struct brw_codegen *p)
1272 {
1273 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1274 memset(insn, 0, sizeof(*insn));
1275 brw_inst_set_opcode(p->devinfo, insn, BRW_OPCODE_NOP);
1276 }
1277
1278
1279
1280
1281
1282 /***********************************************************************
1283 * Comparisons, if/else/endif
1284 */
1285
1286 brw_inst *
1287 brw_JMPI(struct brw_codegen *p, struct brw_reg index,
1288 unsigned predicate_control)
1289 {
1290 const struct gen_device_info *devinfo = p->devinfo;
1291 struct brw_reg ip = brw_ip_reg();
1292 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1293
1294 brw_inst_set_exec_size(devinfo, inst, BRW_EXECUTE_2);
1295 brw_inst_set_qtr_control(devinfo, inst, BRW_COMPRESSION_NONE);
1296 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
1297 brw_inst_set_pred_control(devinfo, inst, predicate_control);
1298
1299 return inst;
1300 }
1301
1302 static void
1303 push_if_stack(struct brw_codegen *p, brw_inst *inst)
1304 {
1305 p->if_stack[p->if_stack_depth] = inst - p->store;
1306
1307 p->if_stack_depth++;
1308 if (p->if_stack_array_size <= p->if_stack_depth) {
1309 p->if_stack_array_size *= 2;
1310 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1311 p->if_stack_array_size);
1312 }
1313 }
1314
1315 static brw_inst *
1316 pop_if_stack(struct brw_codegen *p)
1317 {
1318 p->if_stack_depth--;
1319 return &p->store[p->if_stack[p->if_stack_depth]];
1320 }
1321
1322 static void
1323 push_loop_stack(struct brw_codegen *p, brw_inst *inst)
1324 {
1325 if (p->loop_stack_array_size <= (p->loop_stack_depth + 1)) {
1326 p->loop_stack_array_size *= 2;
1327 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1328 p->loop_stack_array_size);
1329 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1330 p->loop_stack_array_size);
1331 }
1332
1333 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1334 p->loop_stack_depth++;
1335 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1336 }
1337
1338 static brw_inst *
1339 get_inner_do_insn(struct brw_codegen *p)
1340 {
1341 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1342 }
1343
1344 /* EU takes the value from the flag register and pushes it onto some
1345 * sort of a stack (presumably merging with any flag value already on
1346 * the stack). Within an if block, the flags at the top of the stack
1347 * control execution on each channel of the unit, eg. on each of the
1348 * 16 pixel values in our wm programs.
1349 *
1350 * When the matching 'else' instruction is reached (presumably by
1351 * countdown of the instruction count patched in by our ELSE/ENDIF
1352 * functions), the relevant flags are inverted.
1353 *
1354 * When the matching 'endif' instruction is reached, the flags are
1355 * popped off. If the stack is now empty, normal execution resumes.
1356 */
1357 brw_inst *
1358 brw_IF(struct brw_codegen *p, unsigned execute_size)
1359 {
1360 const struct gen_device_info *devinfo = p->devinfo;
1361 brw_inst *insn;
1362
1363 insn = next_insn(p, BRW_OPCODE_IF);
1364
1365 /* Override the defaults for this instruction:
1366 */
1367 if (devinfo->gen < 6) {
1368 brw_set_dest(p, insn, brw_ip_reg());
1369 brw_set_src0(p, insn, brw_ip_reg());
1370 brw_set_src1(p, insn, brw_imm_d(0x0));
1371 } else if (devinfo->gen == 6) {
1372 brw_set_dest(p, insn, brw_imm_w(0));
1373 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1374 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1375 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1376 } else if (devinfo->gen == 7) {
1377 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1378 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1379 brw_set_src1(p, insn, brw_imm_w(0));
1380 brw_inst_set_jip(devinfo, insn, 0);
1381 brw_inst_set_uip(devinfo, insn, 0);
1382 } else {
1383 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1384 brw_set_src0(p, insn, brw_imm_d(0));
1385 brw_inst_set_jip(devinfo, insn, 0);
1386 brw_inst_set_uip(devinfo, insn, 0);
1387 }
1388
1389 brw_inst_set_exec_size(devinfo, insn, execute_size);
1390 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1391 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NORMAL);
1392 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1393 if (!p->single_program_flow && devinfo->gen < 6)
1394 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1395
1396 push_if_stack(p, insn);
1397 p->if_depth_in_loop[p->loop_stack_depth]++;
1398 return insn;
1399 }
1400
1401 /* This function is only used for gen6-style IF instructions with an
1402 * embedded comparison (conditional modifier). It is not used on gen7.
1403 */
1404 brw_inst *
1405 gen6_IF(struct brw_codegen *p, enum brw_conditional_mod conditional,
1406 struct brw_reg src0, struct brw_reg src1)
1407 {
1408 const struct gen_device_info *devinfo = p->devinfo;
1409 brw_inst *insn;
1410
1411 insn = next_insn(p, BRW_OPCODE_IF);
1412
1413 brw_set_dest(p, insn, brw_imm_w(0));
1414 brw_inst_set_exec_size(devinfo, insn,
1415 brw_inst_exec_size(devinfo, p->current));
1416 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1417 brw_set_src0(p, insn, src0);
1418 brw_set_src1(p, insn, src1);
1419
1420 assert(brw_inst_qtr_control(devinfo, insn) == BRW_COMPRESSION_NONE);
1421 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
1422 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1423
1424 push_if_stack(p, insn);
1425 return insn;
1426 }
1427
1428 /**
1429 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1430 */
1431 static void
1432 convert_IF_ELSE_to_ADD(struct brw_codegen *p,
1433 brw_inst *if_inst, brw_inst *else_inst)
1434 {
1435 const struct gen_device_info *devinfo = p->devinfo;
1436
1437 /* The next instruction (where the ENDIF would be, if it existed) */
1438 brw_inst *next_inst = &p->store[p->nr_insn];
1439
1440 assert(p->single_program_flow);
1441 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1442 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1443 assert(brw_inst_exec_size(devinfo, if_inst) == BRW_EXECUTE_1);
1444
1445 /* Convert IF to an ADD instruction that moves the instruction pointer
1446 * to the first instruction of the ELSE block. If there is no ELSE
1447 * block, point to where ENDIF would be. Reverse the predicate.
1448 *
1449 * There's no need to execute an ENDIF since we don't need to do any
1450 * stack operations, and if we're currently executing, we just want to
1451 * continue normally.
1452 */
1453 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_ADD);
1454 brw_inst_set_pred_inv(devinfo, if_inst, true);
1455
1456 if (else_inst != NULL) {
1457 /* Convert ELSE to an ADD instruction that points where the ENDIF
1458 * would be.
1459 */
1460 brw_inst_set_opcode(devinfo, else_inst, BRW_OPCODE_ADD);
1461
1462 brw_inst_set_imm_ud(devinfo, if_inst, (else_inst - if_inst + 1) * 16);
1463 brw_inst_set_imm_ud(devinfo, else_inst, (next_inst - else_inst) * 16);
1464 } else {
1465 brw_inst_set_imm_ud(devinfo, if_inst, (next_inst - if_inst) * 16);
1466 }
1467 }
1468
1469 /**
1470 * Patch IF and ELSE instructions with appropriate jump targets.
1471 */
1472 static void
1473 patch_IF_ELSE(struct brw_codegen *p,
1474 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1475 {
1476 const struct gen_device_info *devinfo = p->devinfo;
1477
1478 /* We shouldn't be patching IF and ELSE instructions in single program flow
1479 * mode when gen < 6, because in single program flow mode on those
1480 * platforms, we convert flow control instructions to conditional ADDs that
1481 * operate on IP (see brw_ENDIF).
1482 *
1483 * However, on Gen6, writing to IP doesn't work in single program flow mode
1484 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1485 * not be updated by non-flow control instructions."). And on later
1486 * platforms, there is no significant benefit to converting control flow
1487 * instructions to conditional ADDs. So we do patch IF and ELSE
1488 * instructions in single program flow mode on those platforms.
1489 */
1490 if (devinfo->gen < 6)
1491 assert(!p->single_program_flow);
1492
1493 assert(if_inst != NULL && brw_inst_opcode(devinfo, if_inst) == BRW_OPCODE_IF);
1494 assert(endif_inst != NULL);
1495 assert(else_inst == NULL || brw_inst_opcode(devinfo, else_inst) == BRW_OPCODE_ELSE);
1496
1497 unsigned br = brw_jump_scale(devinfo);
1498
1499 assert(brw_inst_opcode(devinfo, endif_inst) == BRW_OPCODE_ENDIF);
1500 brw_inst_set_exec_size(devinfo, endif_inst, brw_inst_exec_size(devinfo, if_inst));
1501
1502 if (else_inst == NULL) {
1503 /* Patch IF -> ENDIF */
1504 if (devinfo->gen < 6) {
1505 /* Turn it into an IFF, which means no mask stack operations for
1506 * all-false and jumping past the ENDIF.
1507 */
1508 brw_inst_set_opcode(devinfo, if_inst, BRW_OPCODE_IFF);
1509 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1510 br * (endif_inst - if_inst + 1));
1511 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1512 } else if (devinfo->gen == 6) {
1513 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1514 brw_inst_set_gen6_jump_count(devinfo, if_inst, br*(endif_inst - if_inst));
1515 } else {
1516 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1517 brw_inst_set_jip(devinfo, if_inst, br * (endif_inst - if_inst));
1518 }
1519 } else {
1520 brw_inst_set_exec_size(devinfo, else_inst, brw_inst_exec_size(devinfo, if_inst));
1521
1522 /* Patch IF -> ELSE */
1523 if (devinfo->gen < 6) {
1524 brw_inst_set_gen4_jump_count(devinfo, if_inst,
1525 br * (else_inst - if_inst));
1526 brw_inst_set_gen4_pop_count(devinfo, if_inst, 0);
1527 } else if (devinfo->gen == 6) {
1528 brw_inst_set_gen6_jump_count(devinfo, if_inst,
1529 br * (else_inst - if_inst + 1));
1530 }
1531
1532 /* Patch ELSE -> ENDIF */
1533 if (devinfo->gen < 6) {
1534 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1535 * matching ENDIF.
1536 */
1537 brw_inst_set_gen4_jump_count(devinfo, else_inst,
1538 br * (endif_inst - else_inst + 1));
1539 brw_inst_set_gen4_pop_count(devinfo, else_inst, 1);
1540 } else if (devinfo->gen == 6) {
1541 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1542 brw_inst_set_gen6_jump_count(devinfo, else_inst,
1543 br * (endif_inst - else_inst));
1544 } else {
1545 /* The IF instruction's JIP should point just past the ELSE */
1546 brw_inst_set_jip(devinfo, if_inst, br * (else_inst - if_inst + 1));
1547 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1548 brw_inst_set_uip(devinfo, if_inst, br * (endif_inst - if_inst));
1549 brw_inst_set_jip(devinfo, else_inst, br * (endif_inst - else_inst));
1550 if (devinfo->gen >= 8) {
1551 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1552 * should point to ENDIF.
1553 */
1554 brw_inst_set_uip(devinfo, else_inst, br * (endif_inst - else_inst));
1555 }
1556 }
1557 }
1558 }
1559
1560 void
1561 brw_ELSE(struct brw_codegen *p)
1562 {
1563 const struct gen_device_info *devinfo = p->devinfo;
1564 brw_inst *insn;
1565
1566 insn = next_insn(p, BRW_OPCODE_ELSE);
1567
1568 if (devinfo->gen < 6) {
1569 brw_set_dest(p, insn, brw_ip_reg());
1570 brw_set_src0(p, insn, brw_ip_reg());
1571 brw_set_src1(p, insn, brw_imm_d(0x0));
1572 } else if (devinfo->gen == 6) {
1573 brw_set_dest(p, insn, brw_imm_w(0));
1574 brw_inst_set_gen6_jump_count(devinfo, insn, 0);
1575 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1576 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1577 } else if (devinfo->gen == 7) {
1578 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1579 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1580 brw_set_src1(p, insn, brw_imm_w(0));
1581 brw_inst_set_jip(devinfo, insn, 0);
1582 brw_inst_set_uip(devinfo, insn, 0);
1583 } else {
1584 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1585 brw_set_src0(p, insn, brw_imm_d(0));
1586 brw_inst_set_jip(devinfo, insn, 0);
1587 brw_inst_set_uip(devinfo, insn, 0);
1588 }
1589
1590 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1591 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1592 if (!p->single_program_flow && devinfo->gen < 6)
1593 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1594
1595 push_if_stack(p, insn);
1596 }
1597
1598 void
1599 brw_ENDIF(struct brw_codegen *p)
1600 {
1601 const struct gen_device_info *devinfo = p->devinfo;
1602 brw_inst *insn = NULL;
1603 brw_inst *else_inst = NULL;
1604 brw_inst *if_inst = NULL;
1605 brw_inst *tmp;
1606 bool emit_endif = true;
1607
1608 /* In single program flow mode, we can express IF and ELSE instructions
1609 * equivalently as ADD instructions that operate on IP. On platforms prior
1610 * to Gen6, flow control instructions cause an implied thread switch, so
1611 * this is a significant savings.
1612 *
1613 * However, on Gen6, writing to IP doesn't work in single program flow mode
1614 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1615 * not be updated by non-flow control instructions."). And on later
1616 * platforms, there is no significant benefit to converting control flow
1617 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1618 * Gen5.
1619 */
1620 if (devinfo->gen < 6 && p->single_program_flow)
1621 emit_endif = false;
1622
1623 /*
1624 * A single next_insn() may change the base address of instruction store
1625 * memory(p->store), so call it first before referencing the instruction
1626 * store pointer from an index
1627 */
1628 if (emit_endif)
1629 insn = next_insn(p, BRW_OPCODE_ENDIF);
1630
1631 /* Pop the IF and (optional) ELSE instructions from the stack */
1632 p->if_depth_in_loop[p->loop_stack_depth]--;
1633 tmp = pop_if_stack(p);
1634 if (brw_inst_opcode(devinfo, tmp) == BRW_OPCODE_ELSE) {
1635 else_inst = tmp;
1636 tmp = pop_if_stack(p);
1637 }
1638 if_inst = tmp;
1639
1640 if (!emit_endif) {
1641 /* ENDIF is useless; don't bother emitting it. */
1642 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1643 return;
1644 }
1645
1646 if (devinfo->gen < 6) {
1647 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1648 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1649 brw_set_src1(p, insn, brw_imm_d(0x0));
1650 } else if (devinfo->gen == 6) {
1651 brw_set_dest(p, insn, brw_imm_w(0));
1652 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1653 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1654 } else if (devinfo->gen == 7) {
1655 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1656 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1657 brw_set_src1(p, insn, brw_imm_w(0));
1658 } else {
1659 brw_set_src0(p, insn, brw_imm_d(0));
1660 }
1661
1662 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1663 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_ENABLE);
1664 if (devinfo->gen < 6)
1665 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1666
1667 /* Also pop item off the stack in the endif instruction: */
1668 if (devinfo->gen < 6) {
1669 brw_inst_set_gen4_jump_count(devinfo, insn, 0);
1670 brw_inst_set_gen4_pop_count(devinfo, insn, 1);
1671 } else if (devinfo->gen == 6) {
1672 brw_inst_set_gen6_jump_count(devinfo, insn, 2);
1673 } else {
1674 brw_inst_set_jip(devinfo, insn, 2);
1675 }
1676 patch_IF_ELSE(p, if_inst, else_inst, insn);
1677 }
1678
1679 brw_inst *
1680 brw_BREAK(struct brw_codegen *p)
1681 {
1682 const struct gen_device_info *devinfo = p->devinfo;
1683 brw_inst *insn;
1684
1685 insn = next_insn(p, BRW_OPCODE_BREAK);
1686 if (devinfo->gen >= 8) {
1687 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1688 brw_set_src0(p, insn, brw_imm_d(0x0));
1689 } else if (devinfo->gen >= 6) {
1690 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1691 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1692 brw_set_src1(p, insn, brw_imm_d(0x0));
1693 } else {
1694 brw_set_dest(p, insn, brw_ip_reg());
1695 brw_set_src0(p, insn, brw_ip_reg());
1696 brw_set_src1(p, insn, brw_imm_d(0x0));
1697 brw_inst_set_gen4_pop_count(devinfo, insn,
1698 p->if_depth_in_loop[p->loop_stack_depth]);
1699 }
1700 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1701 brw_inst_set_exec_size(devinfo, insn,
1702 brw_inst_exec_size(devinfo, p->current));
1703
1704 return insn;
1705 }
1706
1707 brw_inst *
1708 brw_CONT(struct brw_codegen *p)
1709 {
1710 const struct gen_device_info *devinfo = p->devinfo;
1711 brw_inst *insn;
1712
1713 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1714 brw_set_dest(p, insn, brw_ip_reg());
1715 if (devinfo->gen >= 8) {
1716 brw_set_src0(p, insn, brw_imm_d(0x0));
1717 } else {
1718 brw_set_src0(p, insn, brw_ip_reg());
1719 brw_set_src1(p, insn, brw_imm_d(0x0));
1720 }
1721
1722 if (devinfo->gen < 6) {
1723 brw_inst_set_gen4_pop_count(devinfo, insn,
1724 p->if_depth_in_loop[p->loop_stack_depth]);
1725 }
1726 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1727 brw_inst_set_exec_size(devinfo, insn,
1728 brw_inst_exec_size(devinfo, p->current));
1729 return insn;
1730 }
1731
1732 brw_inst *
1733 gen6_HALT(struct brw_codegen *p)
1734 {
1735 const struct gen_device_info *devinfo = p->devinfo;
1736 brw_inst *insn;
1737
1738 insn = next_insn(p, BRW_OPCODE_HALT);
1739 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1740 if (devinfo->gen >= 8) {
1741 brw_set_src0(p, insn, brw_imm_d(0x0));
1742 } else {
1743 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1744 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1745 }
1746
1747 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1748 brw_inst_set_exec_size(devinfo, insn,
1749 brw_inst_exec_size(devinfo, p->current));
1750 return insn;
1751 }
1752
1753 /* DO/WHILE loop:
1754 *
1755 * The DO/WHILE is just an unterminated loop -- break or continue are
1756 * used for control within the loop. We have a few ways they can be
1757 * done.
1758 *
1759 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1760 * jip and no DO instruction.
1761 *
1762 * For non-uniform control flow pre-gen6, there's a DO instruction to
1763 * push the mask, and a WHILE to jump back, and BREAK to get out and
1764 * pop the mask.
1765 *
1766 * For gen6, there's no more mask stack, so no need for DO. WHILE
1767 * just points back to the first instruction of the loop.
1768 */
1769 brw_inst *
1770 brw_DO(struct brw_codegen *p, unsigned execute_size)
1771 {
1772 const struct gen_device_info *devinfo = p->devinfo;
1773
1774 if (devinfo->gen >= 6 || p->single_program_flow) {
1775 push_loop_stack(p, &p->store[p->nr_insn]);
1776 return &p->store[p->nr_insn];
1777 } else {
1778 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1779
1780 push_loop_stack(p, insn);
1781
1782 /* Override the defaults for this instruction:
1783 */
1784 brw_set_dest(p, insn, brw_null_reg());
1785 brw_set_src0(p, insn, brw_null_reg());
1786 brw_set_src1(p, insn, brw_null_reg());
1787
1788 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1789 brw_inst_set_exec_size(devinfo, insn, execute_size);
1790 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
1791
1792 return insn;
1793 }
1794 }
1795
1796 /**
1797 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1798 * instruction here.
1799 *
1800 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1801 * nesting, since it can always just point to the end of the block/current loop.
1802 */
1803 static void
1804 brw_patch_break_cont(struct brw_codegen *p, brw_inst *while_inst)
1805 {
1806 const struct gen_device_info *devinfo = p->devinfo;
1807 brw_inst *do_inst = get_inner_do_insn(p);
1808 brw_inst *inst;
1809 unsigned br = brw_jump_scale(devinfo);
1810
1811 assert(devinfo->gen < 6);
1812
1813 for (inst = while_inst - 1; inst != do_inst; inst--) {
1814 /* If the jump count is != 0, that means that this instruction has already
1815 * been patched because it's part of a loop inside of the one we're
1816 * patching.
1817 */
1818 if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_BREAK &&
1819 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1820 brw_inst_set_gen4_jump_count(devinfo, inst, br*((while_inst - inst) + 1));
1821 } else if (brw_inst_opcode(devinfo, inst) == BRW_OPCODE_CONTINUE &&
1822 brw_inst_gen4_jump_count(devinfo, inst) == 0) {
1823 brw_inst_set_gen4_jump_count(devinfo, inst, br * (while_inst - inst));
1824 }
1825 }
1826 }
1827
1828 brw_inst *
1829 brw_WHILE(struct brw_codegen *p)
1830 {
1831 const struct gen_device_info *devinfo = p->devinfo;
1832 brw_inst *insn, *do_insn;
1833 unsigned br = brw_jump_scale(devinfo);
1834
1835 if (devinfo->gen >= 6) {
1836 insn = next_insn(p, BRW_OPCODE_WHILE);
1837 do_insn = get_inner_do_insn(p);
1838
1839 if (devinfo->gen >= 8) {
1840 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1841 brw_set_src0(p, insn, brw_imm_d(0));
1842 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1843 } else if (devinfo->gen == 7) {
1844 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1845 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1846 brw_set_src1(p, insn, brw_imm_w(0));
1847 brw_inst_set_jip(devinfo, insn, br * (do_insn - insn));
1848 } else {
1849 brw_set_dest(p, insn, brw_imm_w(0));
1850 brw_inst_set_gen6_jump_count(devinfo, insn, br * (do_insn - insn));
1851 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1852 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1853 }
1854
1855 brw_inst_set_exec_size(devinfo, insn,
1856 brw_inst_exec_size(devinfo, p->current));
1857
1858 } else {
1859 if (p->single_program_flow) {
1860 insn = next_insn(p, BRW_OPCODE_ADD);
1861 do_insn = get_inner_do_insn(p);
1862
1863 brw_set_dest(p, insn, brw_ip_reg());
1864 brw_set_src0(p, insn, brw_ip_reg());
1865 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1866 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
1867 } else {
1868 insn = next_insn(p, BRW_OPCODE_WHILE);
1869 do_insn = get_inner_do_insn(p);
1870
1871 assert(brw_inst_opcode(devinfo, do_insn) == BRW_OPCODE_DO);
1872
1873 brw_set_dest(p, insn, brw_ip_reg());
1874 brw_set_src0(p, insn, brw_ip_reg());
1875 brw_set_src1(p, insn, brw_imm_d(0));
1876
1877 brw_inst_set_exec_size(devinfo, insn, brw_inst_exec_size(devinfo, do_insn));
1878 brw_inst_set_gen4_jump_count(devinfo, insn, br * (do_insn - insn + 1));
1879 brw_inst_set_gen4_pop_count(devinfo, insn, 0);
1880
1881 brw_patch_break_cont(p, insn);
1882 }
1883 }
1884 brw_inst_set_qtr_control(devinfo, insn, BRW_COMPRESSION_NONE);
1885
1886 p->loop_stack_depth--;
1887
1888 return insn;
1889 }
1890
1891 /* FORWARD JUMPS:
1892 */
1893 void brw_land_fwd_jump(struct brw_codegen *p, int jmp_insn_idx)
1894 {
1895 const struct gen_device_info *devinfo = p->devinfo;
1896 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1897 unsigned jmpi = 1;
1898
1899 if (devinfo->gen >= 5)
1900 jmpi = 2;
1901
1902 assert(brw_inst_opcode(devinfo, jmp_insn) == BRW_OPCODE_JMPI);
1903 assert(brw_inst_src1_reg_file(devinfo, jmp_insn) == BRW_IMMEDIATE_VALUE);
1904
1905 brw_inst_set_gen4_jump_count(devinfo, jmp_insn,
1906 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1907 }
1908
1909 /* To integrate with the above, it makes sense that the comparison
1910 * instruction should populate the flag register. It might be simpler
1911 * just to use the flag reg for most WM tasks?
1912 */
1913 void brw_CMP(struct brw_codegen *p,
1914 struct brw_reg dest,
1915 unsigned conditional,
1916 struct brw_reg src0,
1917 struct brw_reg src1)
1918 {
1919 const struct gen_device_info *devinfo = p->devinfo;
1920 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1921
1922 brw_inst_set_cond_modifier(devinfo, insn, conditional);
1923 brw_set_dest(p, insn, dest);
1924 brw_set_src0(p, insn, src0);
1925 brw_set_src1(p, insn, src1);
1926
1927 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1928 * page says:
1929 * "Any CMP instruction with a null destination must use a {switch}."
1930 *
1931 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1932 * mentioned on their work-arounds pages.
1933 */
1934 if (devinfo->gen == 7) {
1935 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1936 dest.nr == BRW_ARF_NULL) {
1937 brw_inst_set_thread_control(devinfo, insn, BRW_THREAD_SWITCH);
1938 }
1939 }
1940 }
1941
1942 /***********************************************************************
1943 * Helpers for the various SEND message types:
1944 */
1945
1946 /** Extended math function, float[8].
1947 */
1948 void gen4_math(struct brw_codegen *p,
1949 struct brw_reg dest,
1950 unsigned function,
1951 unsigned msg_reg_nr,
1952 struct brw_reg src,
1953 unsigned precision )
1954 {
1955 const struct gen_device_info *devinfo = p->devinfo;
1956 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1957 unsigned data_type;
1958 if (has_scalar_region(src)) {
1959 data_type = BRW_MATH_DATA_SCALAR;
1960 } else {
1961 data_type = BRW_MATH_DATA_VECTOR;
1962 }
1963
1964 assert(devinfo->gen < 6);
1965
1966 /* Example code doesn't set predicate_control for send
1967 * instructions.
1968 */
1969 brw_inst_set_pred_control(devinfo, insn, 0);
1970 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
1971
1972 brw_set_dest(p, insn, dest);
1973 brw_set_src0(p, insn, src);
1974 brw_set_math_message(p,
1975 insn,
1976 function,
1977 src.type == BRW_REGISTER_TYPE_D,
1978 precision,
1979 data_type);
1980 }
1981
1982 void gen6_math(struct brw_codegen *p,
1983 struct brw_reg dest,
1984 unsigned function,
1985 struct brw_reg src0,
1986 struct brw_reg src1)
1987 {
1988 const struct gen_device_info *devinfo = p->devinfo;
1989 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1990
1991 assert(devinfo->gen >= 6);
1992
1993 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1994 (devinfo->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1995
1996 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1997 if (devinfo->gen == 6) {
1998 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1999 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
2000 }
2001
2002 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
2003 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
2004 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
2005 assert(src0.type != BRW_REGISTER_TYPE_F);
2006 assert(src1.type != BRW_REGISTER_TYPE_F);
2007 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
2008 (devinfo->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
2009 } else {
2010 assert(src0.type == BRW_REGISTER_TYPE_F);
2011 assert(src1.type == BRW_REGISTER_TYPE_F);
2012 }
2013
2014 /* Source modifiers are ignored for extended math instructions on Gen6. */
2015 if (devinfo->gen == 6) {
2016 assert(!src0.negate);
2017 assert(!src0.abs);
2018 assert(!src1.negate);
2019 assert(!src1.abs);
2020 }
2021
2022 brw_inst_set_math_function(devinfo, insn, function);
2023
2024 brw_set_dest(p, insn, dest);
2025 brw_set_src0(p, insn, src0);
2026 brw_set_src1(p, insn, src1);
2027 }
2028
2029 /**
2030 * Return the right surface index to access the thread scratch space using
2031 * stateless dataport messages.
2032 */
2033 unsigned
2034 brw_scratch_surface_idx(const struct brw_codegen *p)
2035 {
2036 /* The scratch space is thread-local so IA coherency is unnecessary. */
2037 if (p->devinfo->gen >= 8)
2038 return GEN8_BTI_STATELESS_NON_COHERENT;
2039 else
2040 return BRW_BTI_STATELESS;
2041 }
2042
2043 /**
2044 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
2045 * using a constant offset per channel.
2046 *
2047 * The offset must be aligned to oword size (16 bytes). Used for
2048 * register spilling.
2049 */
2050 void brw_oword_block_write_scratch(struct brw_codegen *p,
2051 struct brw_reg mrf,
2052 int num_regs,
2053 unsigned offset)
2054 {
2055 const struct gen_device_info *devinfo = p->devinfo;
2056 const unsigned target_cache =
2057 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2058 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2059 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2060 uint32_t msg_type;
2061
2062 if (devinfo->gen >= 6)
2063 offset /= 16;
2064
2065 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2066
2067 const unsigned mlen = 1 + num_regs;
2068
2069 /* Set up the message header. This is g0, with g0.2 filled with
2070 * the offset. We don't want to leave our offset around in g0 or
2071 * it'll screw up texture samples, so set it up inside the message
2072 * reg.
2073 */
2074 {
2075 brw_push_insn_state(p);
2076 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2077 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2078 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2079
2080 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2081
2082 /* set message header global offset field (reg 0, element 2) */
2083 brw_MOV(p,
2084 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2085 mrf.nr,
2086 2), BRW_REGISTER_TYPE_UD),
2087 brw_imm_ud(offset));
2088
2089 brw_pop_insn_state(p);
2090 }
2091
2092 {
2093 struct brw_reg dest;
2094 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2095 int send_commit_msg;
2096 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2097 BRW_REGISTER_TYPE_UW);
2098
2099 brw_inst_set_compression(devinfo, insn, false);
2100
2101 if (brw_inst_exec_size(devinfo, insn) >= 16)
2102 src_header = vec16(src_header);
2103
2104 assert(brw_inst_pred_control(devinfo, insn) == BRW_PREDICATE_NONE);
2105 if (devinfo->gen < 6)
2106 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2107
2108 /* Until gen6, writes followed by reads from the same location
2109 * are not guaranteed to be ordered unless write_commit is set.
2110 * If set, then a no-op write is issued to the destination
2111 * register to set a dependency, and a read from the destination
2112 * can be used to ensure the ordering.
2113 *
2114 * For gen6, only writes between different threads need ordering
2115 * protection. Our use of DP writes is all about register
2116 * spilling within a thread.
2117 */
2118 if (devinfo->gen >= 6) {
2119 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2120 send_commit_msg = 0;
2121 } else {
2122 dest = src_header;
2123 send_commit_msg = 1;
2124 }
2125
2126 brw_set_dest(p, insn, dest);
2127 if (devinfo->gen >= 6) {
2128 brw_set_src0(p, insn, mrf);
2129 } else {
2130 brw_set_src0(p, insn, brw_null_reg());
2131 }
2132
2133 if (devinfo->gen >= 6)
2134 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2135 else
2136 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2137
2138 brw_set_dp_write_message(p,
2139 insn,
2140 brw_scratch_surface_idx(p),
2141 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2142 msg_type,
2143 target_cache,
2144 mlen,
2145 true, /* header_present */
2146 0, /* not a render target */
2147 send_commit_msg, /* response_length */
2148 0, /* eot */
2149 send_commit_msg);
2150 }
2151 }
2152
2153
2154 /**
2155 * Read a block of owords (half a GRF each) from the scratch buffer
2156 * using a constant index per channel.
2157 *
2158 * Offset must be aligned to oword size (16 bytes). Used for register
2159 * spilling.
2160 */
2161 void
2162 brw_oword_block_read_scratch(struct brw_codegen *p,
2163 struct brw_reg dest,
2164 struct brw_reg mrf,
2165 int num_regs,
2166 unsigned offset)
2167 {
2168 const struct gen_device_info *devinfo = p->devinfo;
2169
2170 if (devinfo->gen >= 6)
2171 offset /= 16;
2172
2173 if (p->devinfo->gen >= 7) {
2174 /* On gen 7 and above, we no longer have message registers and we can
2175 * send from any register we want. By using the destination register
2176 * for the message, we guarantee that the implied message write won't
2177 * accidentally overwrite anything. This has been a problem because
2178 * the MRF registers and source for the final FB write are both fixed
2179 * and may overlap.
2180 */
2181 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2182 } else {
2183 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2184 }
2185 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2186
2187 const unsigned rlen = num_regs;
2188 const unsigned target_cache =
2189 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2190 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2191 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2192
2193 {
2194 brw_push_insn_state(p);
2195 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2196 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2197 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2198
2199 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2200
2201 /* set message header global offset field (reg 0, element 2) */
2202 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2203
2204 brw_pop_insn_state(p);
2205 }
2206
2207 {
2208 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2209
2210 assert(brw_inst_pred_control(devinfo, insn) == 0);
2211 brw_inst_set_compression(devinfo, insn, false);
2212
2213 brw_set_dest(p, insn, dest); /* UW? */
2214 if (devinfo->gen >= 6) {
2215 brw_set_src0(p, insn, mrf);
2216 } else {
2217 brw_set_src0(p, insn, brw_null_reg());
2218 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2219 }
2220
2221 brw_set_dp_read_message(p,
2222 insn,
2223 brw_scratch_surface_idx(p),
2224 BRW_DATAPORT_OWORD_BLOCK_DWORDS(num_regs * 8),
2225 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2226 target_cache,
2227 1, /* msg_length */
2228 true, /* header_present */
2229 rlen);
2230 }
2231 }
2232
2233 void
2234 gen7_block_read_scratch(struct brw_codegen *p,
2235 struct brw_reg dest,
2236 int num_regs,
2237 unsigned offset)
2238 {
2239 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2240 assert(brw_inst_pred_control(p->devinfo, insn) == BRW_PREDICATE_NONE);
2241
2242 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2243
2244 /* The HW requires that the header is present; this is to get the g0.5
2245 * scratch offset.
2246 */
2247 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2248
2249 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2250 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2251 * is 32 bytes, which happens to be the size of a register.
2252 */
2253 offset /= REG_SIZE;
2254 assert(offset < (1 << 12));
2255
2256 gen7_set_dp_scratch_message(p, insn,
2257 false, /* scratch read */
2258 false, /* OWords */
2259 false, /* invalidate after read */
2260 num_regs,
2261 offset,
2262 1, /* mlen: just g0 */
2263 num_regs, /* rlen */
2264 true); /* header present */
2265 }
2266
2267 /**
2268 * Read float[4] vectors from the data port constant cache.
2269 * Location (in buffer) should be a multiple of 16.
2270 * Used for fetching shader constants.
2271 */
2272 void brw_oword_block_read(struct brw_codegen *p,
2273 struct brw_reg dest,
2274 struct brw_reg mrf,
2275 uint32_t offset,
2276 uint32_t bind_table_index)
2277 {
2278 const struct gen_device_info *devinfo = p->devinfo;
2279 const unsigned target_cache =
2280 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
2281 BRW_DATAPORT_READ_TARGET_DATA_CACHE);
2282 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
2283
2284 /* On newer hardware, offset is in units of owords. */
2285 if (devinfo->gen >= 6)
2286 offset /= 16;
2287
2288 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2289
2290 brw_push_insn_state(p);
2291 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2292 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2293 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2294
2295 brw_push_insn_state(p);
2296 brw_set_default_exec_size(p, BRW_EXECUTE_8);
2297 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2298
2299 /* set message header global offset field (reg 0, element 2) */
2300 brw_MOV(p,
2301 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2302 mrf.nr,
2303 2), BRW_REGISTER_TYPE_UD),
2304 brw_imm_ud(offset));
2305 brw_pop_insn_state(p);
2306
2307 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2308
2309 /* cast dest to a uword[8] vector */
2310 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2311
2312 brw_set_dest(p, insn, dest);
2313 if (devinfo->gen >= 6) {
2314 brw_set_src0(p, insn, mrf);
2315 } else {
2316 brw_set_src0(p, insn, brw_null_reg());
2317 brw_inst_set_base_mrf(devinfo, insn, mrf.nr);
2318 }
2319
2320 brw_set_dp_read_message(p, insn, bind_table_index,
2321 BRW_DATAPORT_OWORD_BLOCK_DWORDS(exec_size),
2322 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2323 target_cache,
2324 1, /* msg_length */
2325 true, /* header_present */
2326 DIV_ROUND_UP(exec_size, 8)); /* response_length */
2327
2328 brw_pop_insn_state(p);
2329 }
2330
2331
2332 void brw_fb_WRITE(struct brw_codegen *p,
2333 struct brw_reg payload,
2334 struct brw_reg implied_header,
2335 unsigned msg_control,
2336 unsigned binding_table_index,
2337 unsigned msg_length,
2338 unsigned response_length,
2339 bool eot,
2340 bool last_render_target,
2341 bool header_present)
2342 {
2343 const struct gen_device_info *devinfo = p->devinfo;
2344 const unsigned target_cache =
2345 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2346 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2347 brw_inst *insn;
2348 unsigned msg_type;
2349 struct brw_reg dest, src0;
2350
2351 if (brw_inst_exec_size(devinfo, p->current) >= BRW_EXECUTE_16)
2352 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2353 else
2354 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2355
2356 if (devinfo->gen >= 6) {
2357 insn = next_insn(p, BRW_OPCODE_SENDC);
2358 } else {
2359 insn = next_insn(p, BRW_OPCODE_SEND);
2360 }
2361 brw_inst_set_compression(devinfo, insn, false);
2362
2363 if (devinfo->gen >= 6) {
2364 /* headerless version, just submit color payload */
2365 src0 = payload;
2366
2367 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2368 } else {
2369 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2370 brw_inst_set_base_mrf(devinfo, insn, payload.nr);
2371 src0 = implied_header;
2372
2373 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2374 }
2375
2376 brw_set_dest(p, insn, dest);
2377 brw_set_src0(p, insn, src0);
2378 brw_set_dp_write_message(p,
2379 insn,
2380 binding_table_index,
2381 msg_control,
2382 msg_type,
2383 target_cache,
2384 msg_length,
2385 header_present,
2386 last_render_target,
2387 response_length,
2388 eot,
2389 0 /* send_commit_msg */);
2390 }
2391
2392 brw_inst *
2393 gen9_fb_READ(struct brw_codegen *p,
2394 struct brw_reg dst,
2395 struct brw_reg payload,
2396 unsigned binding_table_index,
2397 unsigned msg_length,
2398 unsigned response_length,
2399 bool per_sample)
2400 {
2401 const struct gen_device_info *devinfo = p->devinfo;
2402 assert(devinfo->gen >= 9);
2403 const unsigned msg_subtype =
2404 brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16 ? 0 : 1;
2405 brw_inst *insn = next_insn(p, BRW_OPCODE_SENDC);
2406
2407 brw_set_dest(p, insn, dst);
2408 brw_set_src0(p, insn, payload);
2409 brw_set_dp_read_message(p, insn, binding_table_index,
2410 per_sample << 5 | msg_subtype,
2411 GEN9_DATAPORT_RC_RENDER_TARGET_READ,
2412 GEN6_SFID_DATAPORT_RENDER_CACHE,
2413 msg_length, true /* header_present */,
2414 response_length);
2415 brw_inst_set_rt_slot_group(devinfo, insn,
2416 brw_inst_qtr_control(devinfo, p->current) / 2);
2417
2418 return insn;
2419 }
2420
2421 /**
2422 * Texture sample instruction.
2423 * Note: the msg_type plus msg_length values determine exactly what kind
2424 * of sampling operation is performed. See volume 4, page 161 of docs.
2425 */
2426 void brw_SAMPLE(struct brw_codegen *p,
2427 struct brw_reg dest,
2428 unsigned msg_reg_nr,
2429 struct brw_reg src0,
2430 unsigned binding_table_index,
2431 unsigned sampler,
2432 unsigned msg_type,
2433 unsigned response_length,
2434 unsigned msg_length,
2435 unsigned header_present,
2436 unsigned simd_mode,
2437 unsigned return_format)
2438 {
2439 const struct gen_device_info *devinfo = p->devinfo;
2440 brw_inst *insn;
2441
2442 if (msg_reg_nr != -1)
2443 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2444
2445 insn = next_insn(p, BRW_OPCODE_SEND);
2446 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE); /* XXX */
2447
2448 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2449 *
2450 * "Instruction compression is not allowed for this instruction (that
2451 * is, send). The hardware behavior is undefined if this instruction is
2452 * set as compressed. However, compress control can be set to "SecHalf"
2453 * to affect the EMask generation."
2454 *
2455 * No similar wording is found in later PRMs, but there are examples
2456 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2457 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2458 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2459 */
2460 brw_inst_set_compression(devinfo, insn, false);
2461
2462 if (devinfo->gen < 6)
2463 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2464
2465 brw_set_dest(p, insn, dest);
2466 brw_set_src0(p, insn, src0);
2467 brw_set_sampler_message(p, insn,
2468 binding_table_index,
2469 sampler,
2470 msg_type,
2471 response_length,
2472 msg_length,
2473 header_present,
2474 simd_mode,
2475 return_format);
2476 }
2477
2478 /* Adjust the message header's sampler state pointer to
2479 * select the correct group of 16 samplers.
2480 */
2481 void brw_adjust_sampler_state_pointer(struct brw_codegen *p,
2482 struct brw_reg header,
2483 struct brw_reg sampler_index)
2484 {
2485 /* The "Sampler Index" field can only store values between 0 and 15.
2486 * However, we can add an offset to the "Sampler State Pointer"
2487 * field, effectively selecting a different set of 16 samplers.
2488 *
2489 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2490 * offset, and each sampler state is only 16-bytes, so we can't
2491 * exclusively use the offset - we have to use both.
2492 */
2493
2494 const struct gen_device_info *devinfo = p->devinfo;
2495
2496 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2497 const int sampler_state_size = 16; /* 16 bytes */
2498 uint32_t sampler = sampler_index.ud;
2499
2500 if (sampler >= 16) {
2501 assert(devinfo->is_haswell || devinfo->gen >= 8);
2502 brw_ADD(p,
2503 get_element_ud(header, 3),
2504 get_element_ud(brw_vec8_grf(0, 0), 3),
2505 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2506 }
2507 } else {
2508 /* Non-const sampler array indexing case */
2509 if (devinfo->gen < 8 && !devinfo->is_haswell) {
2510 return;
2511 }
2512
2513 struct brw_reg temp = get_element_ud(header, 3);
2514
2515 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2516 brw_SHL(p, temp, temp, brw_imm_ud(4));
2517 brw_ADD(p,
2518 get_element_ud(header, 3),
2519 get_element_ud(brw_vec8_grf(0, 0), 3),
2520 temp);
2521 }
2522 }
2523
2524 /* All these variables are pretty confusing - we might be better off
2525 * using bitmasks and macros for this, in the old style. Or perhaps
2526 * just having the caller instantiate the fields in dword3 itself.
2527 */
2528 void brw_urb_WRITE(struct brw_codegen *p,
2529 struct brw_reg dest,
2530 unsigned msg_reg_nr,
2531 struct brw_reg src0,
2532 enum brw_urb_write_flags flags,
2533 unsigned msg_length,
2534 unsigned response_length,
2535 unsigned offset,
2536 unsigned swizzle)
2537 {
2538 const struct gen_device_info *devinfo = p->devinfo;
2539 brw_inst *insn;
2540
2541 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2542
2543 if (devinfo->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2544 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2545 brw_push_insn_state(p);
2546 brw_set_default_access_mode(p, BRW_ALIGN_1);
2547 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2548 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2549 BRW_REGISTER_TYPE_UD),
2550 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2551 brw_imm_ud(0xff00));
2552 brw_pop_insn_state(p);
2553 }
2554
2555 insn = next_insn(p, BRW_OPCODE_SEND);
2556
2557 assert(msg_length < BRW_MAX_MRF(devinfo->gen));
2558
2559 brw_set_dest(p, insn, dest);
2560 brw_set_src0(p, insn, src0);
2561 brw_set_src1(p, insn, brw_imm_d(0));
2562
2563 if (devinfo->gen < 6)
2564 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2565
2566 brw_set_urb_message(p,
2567 insn,
2568 flags,
2569 msg_length,
2570 response_length,
2571 offset,
2572 swizzle);
2573 }
2574
2575 struct brw_inst *
2576 brw_send_indirect_message(struct brw_codegen *p,
2577 unsigned sfid,
2578 struct brw_reg dst,
2579 struct brw_reg payload,
2580 struct brw_reg desc)
2581 {
2582 const struct gen_device_info *devinfo = p->devinfo;
2583 struct brw_inst *send;
2584 int setup;
2585
2586 dst = retype(dst, BRW_REGISTER_TYPE_UW);
2587
2588 assert(desc.type == BRW_REGISTER_TYPE_UD);
2589
2590 /* We hold on to the setup instruction (the SEND in the direct case, the OR
2591 * in the indirect case) by its index in the instruction store. The
2592 * pointer returned by next_insn() may become invalid if emitting the SEND
2593 * in the indirect case reallocs the store.
2594 */
2595
2596 if (desc.file == BRW_IMMEDIATE_VALUE) {
2597 setup = p->nr_insn;
2598 send = next_insn(p, BRW_OPCODE_SEND);
2599 brw_set_src1(p, send, desc);
2600
2601 } else {
2602 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2603
2604 brw_push_insn_state(p);
2605 brw_set_default_access_mode(p, BRW_ALIGN_1);
2606 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2607 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2608
2609 /* Load the indirect descriptor to an address register using OR so the
2610 * caller can specify additional descriptor bits with the usual
2611 * brw_set_*_message() helper functions.
2612 */
2613 setup = p->nr_insn;
2614 brw_OR(p, addr, desc, brw_imm_ud(0));
2615
2616 brw_pop_insn_state(p);
2617
2618 send = next_insn(p, BRW_OPCODE_SEND);
2619 brw_set_src1(p, send, addr);
2620 }
2621
2622 if (dst.width < BRW_EXECUTE_8)
2623 brw_inst_set_exec_size(devinfo, send, dst.width);
2624
2625 brw_set_dest(p, send, dst);
2626 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
2627 brw_inst_set_sfid(devinfo, send, sfid);
2628
2629 return &p->store[setup];
2630 }
2631
2632 static struct brw_inst *
2633 brw_send_indirect_surface_message(struct brw_codegen *p,
2634 unsigned sfid,
2635 struct brw_reg dst,
2636 struct brw_reg payload,
2637 struct brw_reg surface,
2638 unsigned message_len,
2639 unsigned response_len,
2640 bool header_present)
2641 {
2642 const struct gen_device_info *devinfo = p->devinfo;
2643 struct brw_inst *insn;
2644
2645 if (surface.file != BRW_IMMEDIATE_VALUE) {
2646 struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
2647
2648 brw_push_insn_state(p);
2649 brw_set_default_access_mode(p, BRW_ALIGN_1);
2650 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2651 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2652
2653 /* Mask out invalid bits from the surface index to avoid hangs e.g. when
2654 * some surface array is accessed out of bounds.
2655 */
2656 insn = brw_AND(p, addr,
2657 suboffset(vec1(retype(surface, BRW_REGISTER_TYPE_UD)),
2658 BRW_GET_SWZ(surface.swizzle, 0)),
2659 brw_imm_ud(0xff));
2660
2661 brw_pop_insn_state(p);
2662
2663 surface = addr;
2664 }
2665
2666 insn = brw_send_indirect_message(p, sfid, dst, payload, surface);
2667 brw_inst_set_mlen(devinfo, insn, message_len);
2668 brw_inst_set_rlen(devinfo, insn, response_len);
2669 brw_inst_set_header_present(devinfo, insn, header_present);
2670
2671 return insn;
2672 }
2673
2674 static bool
2675 while_jumps_before_offset(const struct gen_device_info *devinfo,
2676 brw_inst *insn, int while_offset, int start_offset)
2677 {
2678 int scale = 16 / brw_jump_scale(devinfo);
2679 int jip = devinfo->gen == 6 ? brw_inst_gen6_jump_count(devinfo, insn)
2680 : brw_inst_jip(devinfo, insn);
2681 assert(jip < 0);
2682 return while_offset + jip * scale <= start_offset;
2683 }
2684
2685
2686 static int
2687 brw_find_next_block_end(struct brw_codegen *p, int start_offset)
2688 {
2689 int offset;
2690 void *store = p->store;
2691 const struct gen_device_info *devinfo = p->devinfo;
2692
2693 int depth = 0;
2694
2695 for (offset = next_offset(devinfo, store, start_offset);
2696 offset < p->next_insn_offset;
2697 offset = next_offset(devinfo, store, offset)) {
2698 brw_inst *insn = store + offset;
2699
2700 switch (brw_inst_opcode(devinfo, insn)) {
2701 case BRW_OPCODE_IF:
2702 depth++;
2703 break;
2704 case BRW_OPCODE_ENDIF:
2705 if (depth == 0)
2706 return offset;
2707 depth--;
2708 break;
2709 case BRW_OPCODE_WHILE:
2710 /* If the while doesn't jump before our instruction, it's the end
2711 * of a sibling do...while loop. Ignore it.
2712 */
2713 if (!while_jumps_before_offset(devinfo, insn, offset, start_offset))
2714 continue;
2715 /* fallthrough */
2716 case BRW_OPCODE_ELSE:
2717 case BRW_OPCODE_HALT:
2718 if (depth == 0)
2719 return offset;
2720 }
2721 }
2722
2723 return 0;
2724 }
2725
2726 /* There is no DO instruction on gen6, so to find the end of the loop
2727 * we have to see if the loop is jumping back before our start
2728 * instruction.
2729 */
2730 static int
2731 brw_find_loop_end(struct brw_codegen *p, int start_offset)
2732 {
2733 const struct gen_device_info *devinfo = p->devinfo;
2734 int offset;
2735 void *store = p->store;
2736
2737 assert(devinfo->gen >= 6);
2738
2739 /* Always start after the instruction (such as a WHILE) we're trying to fix
2740 * up.
2741 */
2742 for (offset = next_offset(devinfo, store, start_offset);
2743 offset < p->next_insn_offset;
2744 offset = next_offset(devinfo, store, offset)) {
2745 brw_inst *insn = store + offset;
2746
2747 if (brw_inst_opcode(devinfo, insn) == BRW_OPCODE_WHILE) {
2748 if (while_jumps_before_offset(devinfo, insn, offset, start_offset))
2749 return offset;
2750 }
2751 }
2752 assert(!"not reached");
2753 return start_offset;
2754 }
2755
2756 /* After program generation, go back and update the UIP and JIP of
2757 * BREAK, CONT, and HALT instructions to their correct locations.
2758 */
2759 void
2760 brw_set_uip_jip(struct brw_codegen *p, int start_offset)
2761 {
2762 const struct gen_device_info *devinfo = p->devinfo;
2763 int offset;
2764 int br = brw_jump_scale(devinfo);
2765 int scale = 16 / br;
2766 void *store = p->store;
2767
2768 if (devinfo->gen < 6)
2769 return;
2770
2771 for (offset = start_offset; offset < p->next_insn_offset; offset += 16) {
2772 brw_inst *insn = store + offset;
2773 assert(brw_inst_cmpt_control(devinfo, insn) == 0);
2774
2775 int block_end_offset = brw_find_next_block_end(p, offset);
2776 switch (brw_inst_opcode(devinfo, insn)) {
2777 case BRW_OPCODE_BREAK:
2778 assert(block_end_offset != 0);
2779 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2780 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2781 brw_inst_set_uip(devinfo, insn,
2782 (brw_find_loop_end(p, offset) - offset +
2783 (devinfo->gen == 6 ? 16 : 0)) / scale);
2784 break;
2785 case BRW_OPCODE_CONTINUE:
2786 assert(block_end_offset != 0);
2787 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2788 brw_inst_set_uip(devinfo, insn,
2789 (brw_find_loop_end(p, offset) - offset) / scale);
2790
2791 assert(brw_inst_uip(devinfo, insn) != 0);
2792 assert(brw_inst_jip(devinfo, insn) != 0);
2793 break;
2794
2795 case BRW_OPCODE_ENDIF: {
2796 int32_t jump = (block_end_offset == 0) ?
2797 1 * br : (block_end_offset - offset) / scale;
2798 if (devinfo->gen >= 7)
2799 brw_inst_set_jip(devinfo, insn, jump);
2800 else
2801 brw_inst_set_gen6_jump_count(devinfo, insn, jump);
2802 break;
2803 }
2804
2805 case BRW_OPCODE_HALT:
2806 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2807 *
2808 * "In case of the halt instruction not inside any conditional
2809 * code block, the value of <JIP> and <UIP> should be the
2810 * same. In case of the halt instruction inside conditional code
2811 * block, the <UIP> should be the end of the program, and the
2812 * <JIP> should be end of the most inner conditional code block."
2813 *
2814 * The uip will have already been set by whoever set up the
2815 * instruction.
2816 */
2817 if (block_end_offset == 0) {
2818 brw_inst_set_jip(devinfo, insn, brw_inst_uip(devinfo, insn));
2819 } else {
2820 brw_inst_set_jip(devinfo, insn, (block_end_offset - offset) / scale);
2821 }
2822 assert(brw_inst_uip(devinfo, insn) != 0);
2823 assert(brw_inst_jip(devinfo, insn) != 0);
2824 break;
2825 }
2826 }
2827 }
2828
2829 void brw_ff_sync(struct brw_codegen *p,
2830 struct brw_reg dest,
2831 unsigned msg_reg_nr,
2832 struct brw_reg src0,
2833 bool allocate,
2834 unsigned response_length,
2835 bool eot)
2836 {
2837 const struct gen_device_info *devinfo = p->devinfo;
2838 brw_inst *insn;
2839
2840 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2841
2842 insn = next_insn(p, BRW_OPCODE_SEND);
2843 brw_set_dest(p, insn, dest);
2844 brw_set_src0(p, insn, src0);
2845 brw_set_src1(p, insn, brw_imm_d(0));
2846
2847 if (devinfo->gen < 6)
2848 brw_inst_set_base_mrf(devinfo, insn, msg_reg_nr);
2849
2850 brw_set_ff_sync_message(p,
2851 insn,
2852 allocate,
2853 response_length,
2854 eot);
2855 }
2856
2857 /**
2858 * Emit the SEND instruction necessary to generate stream output data on Gen6
2859 * (for transform feedback).
2860 *
2861 * If send_commit_msg is true, this is the last piece of stream output data
2862 * from this thread, so send the data as a committed write. According to the
2863 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2864 *
2865 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2866 * writes are complete by sending the final write as a committed write."
2867 */
2868 void
2869 brw_svb_write(struct brw_codegen *p,
2870 struct brw_reg dest,
2871 unsigned msg_reg_nr,
2872 struct brw_reg src0,
2873 unsigned binding_table_index,
2874 bool send_commit_msg)
2875 {
2876 const struct gen_device_info *devinfo = p->devinfo;
2877 const unsigned target_cache =
2878 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
2879 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
2880 BRW_DATAPORT_READ_TARGET_RENDER_CACHE);
2881 brw_inst *insn;
2882
2883 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2884
2885 insn = next_insn(p, BRW_OPCODE_SEND);
2886 brw_set_dest(p, insn, dest);
2887 brw_set_src0(p, insn, src0);
2888 brw_set_src1(p, insn, brw_imm_d(0));
2889 brw_set_dp_write_message(p, insn,
2890 binding_table_index,
2891 0, /* msg_control: ignored */
2892 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2893 target_cache,
2894 1, /* msg_length */
2895 true, /* header_present */
2896 0, /* last_render_target: ignored */
2897 send_commit_msg, /* response_length */
2898 0, /* end_of_thread */
2899 send_commit_msg); /* send_commit_msg */
2900 }
2901
2902 static unsigned
2903 brw_surface_payload_size(struct brw_codegen *p,
2904 unsigned num_channels,
2905 bool has_simd4x2,
2906 bool has_simd16)
2907 {
2908 if (has_simd4x2 &&
2909 brw_inst_access_mode(p->devinfo, p->current) == BRW_ALIGN_16)
2910 return 1;
2911 else if (has_simd16 &&
2912 brw_inst_exec_size(p->devinfo, p->current) == BRW_EXECUTE_16)
2913 return 2 * num_channels;
2914 else
2915 return num_channels;
2916 }
2917
2918 static void
2919 brw_set_dp_untyped_atomic_message(struct brw_codegen *p,
2920 brw_inst *insn,
2921 unsigned atomic_op,
2922 bool response_expected)
2923 {
2924 const struct gen_device_info *devinfo = p->devinfo;
2925 unsigned msg_control =
2926 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2927 (response_expected ? 1 << 5 : 0); /* Return data expected */
2928
2929 if (devinfo->gen >= 8 || devinfo->is_haswell) {
2930 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2931 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2932 msg_control |= 1 << 4; /* SIMD8 mode */
2933
2934 brw_inst_set_dp_msg_type(devinfo, insn,
2935 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2936 } else {
2937 brw_inst_set_dp_msg_type(devinfo, insn,
2938 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2939 }
2940 } else {
2941 brw_inst_set_dp_msg_type(devinfo, insn,
2942 GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2943
2944 if (brw_inst_exec_size(devinfo, p->current) != BRW_EXECUTE_16)
2945 msg_control |= 1 << 4; /* SIMD8 mode */
2946 }
2947
2948 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
2949 }
2950
2951 void
2952 brw_untyped_atomic(struct brw_codegen *p,
2953 struct brw_reg dst,
2954 struct brw_reg payload,
2955 struct brw_reg surface,
2956 unsigned atomic_op,
2957 unsigned msg_length,
2958 bool response_expected)
2959 {
2960 const struct gen_device_info *devinfo = p->devinfo;
2961 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
2962 HSW_SFID_DATAPORT_DATA_CACHE_1 :
2963 GEN7_SFID_DATAPORT_DATA_CACHE);
2964 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
2965 /* Mask out unused components -- This is especially important in Align16
2966 * mode on generations that don't have native support for SIMD4x2 atomics,
2967 * because unused but enabled components will cause the dataport to perform
2968 * additional atomic operations on the addresses that happen to be in the
2969 * uninitialized Y, Z and W coordinates of the payload.
2970 */
2971 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
2972 struct brw_inst *insn = brw_send_indirect_surface_message(
2973 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
2974 brw_surface_payload_size(p, response_expected,
2975 devinfo->gen >= 8 || devinfo->is_haswell, true),
2976 align1);
2977
2978 brw_set_dp_untyped_atomic_message(
2979 p, insn, atomic_op, response_expected);
2980 }
2981
2982 static void
2983 brw_set_dp_untyped_surface_read_message(struct brw_codegen *p,
2984 struct brw_inst *insn,
2985 unsigned num_channels)
2986 {
2987 const struct gen_device_info *devinfo = p->devinfo;
2988 /* Set mask of 32-bit channels to drop. */
2989 unsigned msg_control = 0xf & (0xf << num_channels);
2990
2991 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
2992 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
2993 msg_control |= 1 << 4; /* SIMD16 mode */
2994 else
2995 msg_control |= 2 << 4; /* SIMD8 mode */
2996 }
2997
2998 brw_inst_set_dp_msg_type(devinfo, insn,
2999 (devinfo->gen >= 8 || devinfo->is_haswell ?
3000 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ :
3001 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ));
3002 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3003 }
3004
3005 void
3006 brw_untyped_surface_read(struct brw_codegen *p,
3007 struct brw_reg dst,
3008 struct brw_reg payload,
3009 struct brw_reg surface,
3010 unsigned msg_length,
3011 unsigned num_channels)
3012 {
3013 const struct gen_device_info *devinfo = p->devinfo;
3014 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3015 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3016 GEN7_SFID_DATAPORT_DATA_CACHE);
3017 struct brw_inst *insn = brw_send_indirect_surface_message(
3018 p, sfid, dst, payload, surface, msg_length,
3019 brw_surface_payload_size(p, num_channels, true, true),
3020 false);
3021
3022 brw_set_dp_untyped_surface_read_message(
3023 p, insn, num_channels);
3024 }
3025
3026 static void
3027 brw_set_dp_untyped_surface_write_message(struct brw_codegen *p,
3028 struct brw_inst *insn,
3029 unsigned num_channels)
3030 {
3031 const struct gen_device_info *devinfo = p->devinfo;
3032 /* Set mask of 32-bit channels to drop. */
3033 unsigned msg_control = 0xf & (0xf << num_channels);
3034
3035 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3036 if (brw_inst_exec_size(devinfo, p->current) == BRW_EXECUTE_16)
3037 msg_control |= 1 << 4; /* SIMD16 mode */
3038 else
3039 msg_control |= 2 << 4; /* SIMD8 mode */
3040 } else {
3041 if (devinfo->gen >= 8 || devinfo->is_haswell)
3042 msg_control |= 0 << 4; /* SIMD4x2 mode */
3043 else
3044 msg_control |= 2 << 4; /* SIMD8 mode */
3045 }
3046
3047 brw_inst_set_dp_msg_type(devinfo, insn,
3048 devinfo->gen >= 8 || devinfo->is_haswell ?
3049 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE :
3050 GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE);
3051 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3052 }
3053
3054 void
3055 brw_untyped_surface_write(struct brw_codegen *p,
3056 struct brw_reg payload,
3057 struct brw_reg surface,
3058 unsigned msg_length,
3059 unsigned num_channels)
3060 {
3061 const struct gen_device_info *devinfo = p->devinfo;
3062 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3063 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3064 GEN7_SFID_DATAPORT_DATA_CACHE);
3065 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3066 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3067 const unsigned mask = devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3068 WRITEMASK_X : WRITEMASK_XYZW;
3069 struct brw_inst *insn = brw_send_indirect_surface_message(
3070 p, sfid, brw_writemask(brw_null_reg(), mask),
3071 payload, surface, msg_length, 0, align1);
3072
3073 brw_set_dp_untyped_surface_write_message(
3074 p, insn, num_channels);
3075 }
3076
3077 static void
3078 brw_set_dp_typed_atomic_message(struct brw_codegen *p,
3079 struct brw_inst *insn,
3080 unsigned atomic_op,
3081 bool response_expected)
3082 {
3083 const struct gen_device_info *devinfo = p->devinfo;
3084 unsigned msg_control =
3085 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
3086 (response_expected ? 1 << 5 : 0); /* Return data expected */
3087
3088 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3089 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3090 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3091 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3092
3093 brw_inst_set_dp_msg_type(devinfo, insn,
3094 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP);
3095 } else {
3096 brw_inst_set_dp_msg_type(devinfo, insn,
3097 HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2);
3098 }
3099
3100 } else {
3101 brw_inst_set_dp_msg_type(devinfo, insn,
3102 GEN7_DATAPORT_RC_TYPED_ATOMIC_OP);
3103
3104 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3105 msg_control |= 1 << 4; /* Use high 8 slots of the sample mask */
3106 }
3107
3108 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3109 }
3110
3111 void
3112 brw_typed_atomic(struct brw_codegen *p,
3113 struct brw_reg dst,
3114 struct brw_reg payload,
3115 struct brw_reg surface,
3116 unsigned atomic_op,
3117 unsigned msg_length,
3118 bool response_expected) {
3119 const struct gen_device_info *devinfo = p->devinfo;
3120 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3121 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3122 GEN6_SFID_DATAPORT_RENDER_CACHE);
3123 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3124 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3125 const unsigned mask = align1 ? WRITEMASK_XYZW : WRITEMASK_X;
3126 struct brw_inst *insn = brw_send_indirect_surface_message(
3127 p, sfid, brw_writemask(dst, mask), payload, surface, msg_length,
3128 brw_surface_payload_size(p, response_expected,
3129 devinfo->gen >= 8 || devinfo->is_haswell, false),
3130 true);
3131
3132 brw_set_dp_typed_atomic_message(
3133 p, insn, atomic_op, response_expected);
3134 }
3135
3136 static void
3137 brw_set_dp_typed_surface_read_message(struct brw_codegen *p,
3138 struct brw_inst *insn,
3139 unsigned num_channels)
3140 {
3141 const struct gen_device_info *devinfo = p->devinfo;
3142 /* Set mask of unused channels. */
3143 unsigned msg_control = 0xf & (0xf << num_channels);
3144
3145 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3146 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3147 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3148 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3149 else
3150 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3151 }
3152
3153 brw_inst_set_dp_msg_type(devinfo, insn,
3154 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ);
3155 } else {
3156 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3157 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3158 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3159 }
3160
3161 brw_inst_set_dp_msg_type(devinfo, insn,
3162 GEN7_DATAPORT_RC_TYPED_SURFACE_READ);
3163 }
3164
3165 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3166 }
3167
3168 void
3169 brw_typed_surface_read(struct brw_codegen *p,
3170 struct brw_reg dst,
3171 struct brw_reg payload,
3172 struct brw_reg surface,
3173 unsigned msg_length,
3174 unsigned num_channels)
3175 {
3176 const struct gen_device_info *devinfo = p->devinfo;
3177 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3178 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3179 GEN6_SFID_DATAPORT_RENDER_CACHE);
3180 struct brw_inst *insn = brw_send_indirect_surface_message(
3181 p, sfid, dst, payload, surface, msg_length,
3182 brw_surface_payload_size(p, num_channels,
3183 devinfo->gen >= 8 || devinfo->is_haswell, false),
3184 true);
3185
3186 brw_set_dp_typed_surface_read_message(
3187 p, insn, num_channels);
3188 }
3189
3190 static void
3191 brw_set_dp_typed_surface_write_message(struct brw_codegen *p,
3192 struct brw_inst *insn,
3193 unsigned num_channels)
3194 {
3195 const struct gen_device_info *devinfo = p->devinfo;
3196 /* Set mask of unused channels. */
3197 unsigned msg_control = 0xf & (0xf << num_channels);
3198
3199 if (devinfo->gen >= 8 || devinfo->is_haswell) {
3200 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3201 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3202 msg_control |= 2 << 4; /* Use high 8 slots of the sample mask */
3203 else
3204 msg_control |= 1 << 4; /* Use low 8 slots of the sample mask */
3205 }
3206
3207 brw_inst_set_dp_msg_type(devinfo, insn,
3208 HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE);
3209
3210 } else {
3211 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3212 if (brw_inst_qtr_control(devinfo, p->current) % 2 == 1)
3213 msg_control |= 1 << 5; /* Use high 8 slots of the sample mask */
3214 }
3215
3216 brw_inst_set_dp_msg_type(devinfo, insn,
3217 GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE);
3218 }
3219
3220 brw_inst_set_dp_msg_control(devinfo, insn, msg_control);
3221 }
3222
3223 void
3224 brw_typed_surface_write(struct brw_codegen *p,
3225 struct brw_reg payload,
3226 struct brw_reg surface,
3227 unsigned msg_length,
3228 unsigned num_channels)
3229 {
3230 const struct gen_device_info *devinfo = p->devinfo;
3231 const unsigned sfid = (devinfo->gen >= 8 || devinfo->is_haswell ?
3232 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3233 GEN6_SFID_DATAPORT_RENDER_CACHE);
3234 const bool align1 = (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1);
3235 /* Mask out unused components -- See comment in brw_untyped_atomic(). */
3236 const unsigned mask = (devinfo->gen == 7 && !devinfo->is_haswell && !align1 ?
3237 WRITEMASK_X : WRITEMASK_XYZW);
3238 struct brw_inst *insn = brw_send_indirect_surface_message(
3239 p, sfid, brw_writemask(brw_null_reg(), mask),
3240 payload, surface, msg_length, 0, true);
3241
3242 brw_set_dp_typed_surface_write_message(
3243 p, insn, num_channels);
3244 }
3245
3246 static void
3247 brw_set_memory_fence_message(struct brw_codegen *p,
3248 struct brw_inst *insn,
3249 enum brw_message_target sfid,
3250 bool commit_enable)
3251 {
3252 const struct gen_device_info *devinfo = p->devinfo;
3253
3254 brw_set_message_descriptor(p, insn, sfid,
3255 1 /* message length */,
3256 (commit_enable ? 1 : 0) /* response length */,
3257 true /* header present */,
3258 false);
3259
3260 switch (sfid) {
3261 case GEN6_SFID_DATAPORT_RENDER_CACHE:
3262 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_RC_MEMORY_FENCE);
3263 break;
3264 case GEN7_SFID_DATAPORT_DATA_CACHE:
3265 brw_inst_set_dp_msg_type(devinfo, insn, GEN7_DATAPORT_DC_MEMORY_FENCE);
3266 break;
3267 default:
3268 unreachable("Not reached");
3269 }
3270
3271 if (commit_enable)
3272 brw_inst_set_dp_msg_control(devinfo, insn, 1 << 5);
3273 }
3274
3275 void
3276 brw_memory_fence(struct brw_codegen *p,
3277 struct brw_reg dst)
3278 {
3279 const struct gen_device_info *devinfo = p->devinfo;
3280 const bool commit_enable = devinfo->gen == 7 && !devinfo->is_haswell;
3281 struct brw_inst *insn;
3282
3283 brw_push_insn_state(p);
3284 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3285 brw_set_default_exec_size(p, BRW_EXECUTE_1);
3286 dst = vec1(dst);
3287
3288 /* Set dst as destination for dependency tracking, the MEMORY_FENCE
3289 * message doesn't write anything back.
3290 */
3291 insn = next_insn(p, BRW_OPCODE_SEND);
3292 dst = retype(dst, BRW_REGISTER_TYPE_UW);
3293 brw_set_dest(p, insn, dst);
3294 brw_set_src0(p, insn, dst);
3295 brw_set_memory_fence_message(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
3296 commit_enable);
3297
3298 if (devinfo->gen == 7 && !devinfo->is_haswell) {
3299 /* IVB does typed surface access through the render cache, so we need to
3300 * flush it too. Use a different register so both flushes can be
3301 * pipelined by the hardware.
3302 */
3303 insn = next_insn(p, BRW_OPCODE_SEND);
3304 brw_set_dest(p, insn, offset(dst, 1));
3305 brw_set_src0(p, insn, offset(dst, 1));
3306 brw_set_memory_fence_message(p, insn, GEN6_SFID_DATAPORT_RENDER_CACHE,
3307 commit_enable);
3308
3309 /* Now write the response of the second message into the response of the
3310 * first to trigger a pipeline stall -- This way future render and data
3311 * cache messages will be properly ordered with respect to past data and
3312 * render cache messages.
3313 */
3314 brw_MOV(p, dst, offset(dst, 1));
3315 }
3316
3317 brw_pop_insn_state(p);
3318 }
3319
3320 void
3321 brw_pixel_interpolator_query(struct brw_codegen *p,
3322 struct brw_reg dest,
3323 struct brw_reg mrf,
3324 bool noperspective,
3325 unsigned mode,
3326 struct brw_reg data,
3327 unsigned msg_length,
3328 unsigned response_length)
3329 {
3330 const struct gen_device_info *devinfo = p->devinfo;
3331 struct brw_inst *insn;
3332 const uint16_t exec_size = brw_inst_exec_size(devinfo, p->current);
3333
3334 /* brw_send_indirect_message will automatically use a direct send message
3335 * if data is actually immediate.
3336 */
3337 insn = brw_send_indirect_message(p,
3338 GEN7_SFID_PIXEL_INTERPOLATOR,
3339 dest,
3340 mrf,
3341 vec1(data));
3342 brw_inst_set_mlen(devinfo, insn, msg_length);
3343 brw_inst_set_rlen(devinfo, insn, response_length);
3344
3345 brw_inst_set_pi_simd_mode(devinfo, insn, exec_size == BRW_EXECUTE_16);
3346 brw_inst_set_pi_slot_group(devinfo, insn, 0); /* zero unless 32/64px dispatch */
3347 brw_inst_set_pi_nopersp(devinfo, insn, noperspective);
3348 brw_inst_set_pi_message_type(devinfo, insn, mode);
3349 }
3350
3351 void
3352 brw_find_live_channel(struct brw_codegen *p, struct brw_reg dst,
3353 struct brw_reg mask)
3354 {
3355 const struct gen_device_info *devinfo = p->devinfo;
3356 const unsigned exec_size = 1 << brw_inst_exec_size(devinfo, p->current);
3357 const unsigned qtr_control = brw_inst_qtr_control(devinfo, p->current);
3358 brw_inst *inst;
3359
3360 assert(devinfo->gen >= 7);
3361 assert(mask.type == BRW_REGISTER_TYPE_UD);
3362
3363 brw_push_insn_state(p);
3364
3365 if (brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1) {
3366 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3367
3368 if (devinfo->gen >= 8) {
3369 /* Getting the first active channel index is easy on Gen8: Just find
3370 * the first bit set in the execution mask. The register exists on
3371 * HSW already but it reads back as all ones when the current
3372 * instruction has execution masking disabled, so it's kind of
3373 * useless.
3374 */
3375 struct brw_reg exec_mask =
3376 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD);
3377
3378 if (mask.file != BRW_IMMEDIATE_VALUE || mask.ud != 0xffffffff) {
3379 /* Unfortunately, ce0 does not take into account the thread
3380 * dispatch mask, which may be a problem in cases where it's not
3381 * tightly packed (i.e. it doesn't have the form '2^n - 1' for
3382 * some n). Combine ce0 with the given dispatch (or vector) mask
3383 * to mask off those channels which were never dispatched by the
3384 * hardware.
3385 */
3386 brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
3387 brw_AND(p, vec1(dst), exec_mask, vec1(dst));
3388 exec_mask = vec1(dst);
3389 }
3390
3391 /* Quarter control has the effect of magically shifting the value of
3392 * ce0 so you'll get the first active channel relative to the
3393 * specified quarter control as result.
3394 */
3395 inst = brw_FBL(p, vec1(dst), exec_mask);
3396 } else {
3397 const struct brw_reg flag = brw_flag_reg(1, 0);
3398
3399 brw_MOV(p, retype(flag, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
3400
3401 /* Run enough instructions returning zero with execution masking and
3402 * a conditional modifier enabled in order to get the full execution
3403 * mask in f1.0. We could use a single 32-wide move here if it
3404 * weren't because of the hardware bug that causes channel enables to
3405 * be applied incorrectly to the second half of 32-wide instructions
3406 * on Gen7.
3407 */
3408 const unsigned lower_size = MIN2(16, exec_size);
3409 for (unsigned i = 0; i < exec_size / lower_size; i++) {
3410 inst = brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
3411 brw_imm_uw(0));
3412 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3413 brw_inst_set_group(devinfo, inst, lower_size * i + 8 * qtr_control);
3414 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_Z);
3415 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3416 brw_inst_set_exec_size(devinfo, inst, cvt(lower_size) - 1);
3417 }
3418
3419 /* Find the first bit set in the exec_size-wide portion of the flag
3420 * register that was updated by the last sequence of MOV
3421 * instructions.
3422 */
3423 const enum brw_reg_type type = brw_int_type(exec_size / 8, false);
3424 brw_FBL(p, vec1(dst), byte_offset(retype(flag, type), qtr_control));
3425 }
3426 } else {
3427 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3428
3429 if (devinfo->gen >= 8 &&
3430 mask.file == BRW_IMMEDIATE_VALUE && mask.ud == 0xffffffff) {
3431 /* In SIMD4x2 mode the first active channel index is just the
3432 * negation of the first bit of the mask register. Note that ce0
3433 * doesn't take into account the dispatch mask, so the Gen7 path
3434 * should be used instead unless you have the guarantee that the
3435 * dispatch mask is tightly packed (i.e. it has the form '2^n - 1'
3436 * for some n).
3437 */
3438 inst = brw_AND(p, brw_writemask(dst, WRITEMASK_X),
3439 negate(retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD)),
3440 brw_imm_ud(1));
3441
3442 } else {
3443 /* Overwrite the destination without and with execution masking to
3444 * find out which of the channels is active.
3445 */
3446 brw_push_insn_state(p);
3447 brw_set_default_exec_size(p, BRW_EXECUTE_4);
3448 brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3449 brw_imm_ud(1));
3450
3451 inst = brw_MOV(p, brw_writemask(vec4(dst), WRITEMASK_X),
3452 brw_imm_ud(0));
3453 brw_pop_insn_state(p);
3454 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_ENABLE);
3455 }
3456 }
3457
3458 brw_pop_insn_state(p);
3459 }
3460
3461 void
3462 brw_broadcast(struct brw_codegen *p,
3463 struct brw_reg dst,
3464 struct brw_reg src,
3465 struct brw_reg idx)
3466 {
3467 const struct gen_device_info *devinfo = p->devinfo;
3468 const bool align1 = brw_inst_access_mode(devinfo, p->current) == BRW_ALIGN_1;
3469 brw_inst *inst;
3470
3471 brw_push_insn_state(p);
3472 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3473 brw_set_default_exec_size(p, align1 ? BRW_EXECUTE_1 : BRW_EXECUTE_4);
3474
3475 assert(src.file == BRW_GENERAL_REGISTER_FILE &&
3476 src.address_mode == BRW_ADDRESS_DIRECT);
3477
3478 if ((src.vstride == 0 && (src.hstride == 0 || !align1)) ||
3479 idx.file == BRW_IMMEDIATE_VALUE) {
3480 /* Trivial, the source is already uniform or the index is a constant.
3481 * We will typically not get here if the optimizer is doing its job, but
3482 * asserting would be mean.
3483 */
3484 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
3485 brw_MOV(p, dst,
3486 (align1 ? stride(suboffset(src, i), 0, 1, 0) :
3487 stride(suboffset(src, 4 * i), 0, 4, 1)));
3488 } else {
3489 if (align1) {
3490 const struct brw_reg addr =
3491 retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
3492 const unsigned offset = src.nr * REG_SIZE + src.subnr;
3493 /* Limit in bytes of the signed indirect addressing immediate. */
3494 const unsigned limit = 512;
3495
3496 brw_push_insn_state(p);
3497 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3498 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
3499
3500 /* Take into account the component size and horizontal stride. */
3501 assert(src.vstride == src.hstride + src.width);
3502 brw_SHL(p, addr, vec1(idx),
3503 brw_imm_ud(_mesa_logbase2(type_sz(src.type)) +
3504 src.hstride - 1));
3505
3506 /* We can only address up to limit bytes using the indirect
3507 * addressing immediate, account for the difference if the source
3508 * register is above this limit.
3509 */
3510 if (offset >= limit)
3511 brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
3512
3513 brw_pop_insn_state(p);
3514
3515 /* Use indirect addressing to fetch the specified component. */
3516 brw_MOV(p, dst,
3517 retype(brw_vec1_indirect(addr.subnr, offset % limit),
3518 src.type));
3519 } else {
3520 /* In SIMD4x2 mode the index can be either zero or one, replicate it
3521 * to all bits of a flag register,
3522 */
3523 inst = brw_MOV(p,
3524 brw_null_reg(),
3525 stride(brw_swizzle(idx, BRW_SWIZZLE_XXXX), 4, 4, 1));
3526 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NONE);
3527 brw_inst_set_cond_modifier(devinfo, inst, BRW_CONDITIONAL_NZ);
3528 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3529
3530 /* and use predicated SEL to pick the right channel. */
3531 inst = brw_SEL(p, dst,
3532 stride(suboffset(src, 4), 4, 4, 1),
3533 stride(src, 4, 4, 1));
3534 brw_inst_set_pred_control(devinfo, inst, BRW_PREDICATE_NORMAL);
3535 brw_inst_set_flag_reg_nr(devinfo, inst, 1);
3536 }
3537 }
3538
3539 brw_pop_insn_state(p);
3540 }
3541
3542 /**
3543 * This instruction is generated as a single-channel align1 instruction by
3544 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
3545 *
3546 * We can't use the typed atomic op in the FS because that has the execution
3547 * mask ANDed with the pixel mask, but we just want to write the one dword for
3548 * all the pixels.
3549 *
3550 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
3551 * one u32. So we use the same untyped atomic write message as the pixel
3552 * shader.
3553 *
3554 * The untyped atomic operation requires a BUFFER surface type with RAW
3555 * format, and is only accessible through the legacy DATA_CACHE dataport
3556 * messages.
3557 */
3558 void brw_shader_time_add(struct brw_codegen *p,
3559 struct brw_reg payload,
3560 uint32_t surf_index)
3561 {
3562 const unsigned sfid = (p->devinfo->gen >= 8 || p->devinfo->is_haswell ?
3563 HSW_SFID_DATAPORT_DATA_CACHE_1 :
3564 GEN7_SFID_DATAPORT_DATA_CACHE);
3565 assert(p->devinfo->gen >= 7);
3566
3567 brw_push_insn_state(p);
3568 brw_set_default_access_mode(p, BRW_ALIGN_1);
3569 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
3570 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
3571 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
3572
3573 /* We use brw_vec1_reg and unmasked because we want to increment the given
3574 * offset only once.
3575 */
3576 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
3577 BRW_ARF_NULL, 0));
3578 brw_set_src0(p, send, brw_vec1_reg(payload.file,
3579 payload.nr, 0));
3580 brw_set_src1(p, send, brw_imm_ud(0));
3581 brw_set_message_descriptor(p, send, sfid, 2, 0, false, false);
3582 brw_inst_set_binding_table_index(p->devinfo, send, surf_index);
3583 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, false);
3584
3585 brw_pop_insn_state(p);
3586 }
3587
3588
3589 /**
3590 * Emit the SEND message for a barrier
3591 */
3592 void
3593 brw_barrier(struct brw_codegen *p, struct brw_reg src)
3594 {
3595 const struct gen_device_info *devinfo = p->devinfo;
3596 struct brw_inst *inst;
3597
3598 assert(devinfo->gen >= 7);
3599
3600 brw_push_insn_state(p);
3601 brw_set_default_access_mode(p, BRW_ALIGN_1);
3602 inst = next_insn(p, BRW_OPCODE_SEND);
3603 brw_set_dest(p, inst, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
3604 brw_set_src0(p, inst, src);
3605 brw_set_src1(p, inst, brw_null_reg());
3606
3607 brw_set_message_descriptor(p, inst, BRW_SFID_MESSAGE_GATEWAY,
3608 1 /* msg_length */,
3609 0 /* response_length */,
3610 false /* header_present */,
3611 false /* end_of_thread */);
3612
3613 brw_inst_set_gateway_notify(devinfo, inst, 1);
3614 brw_inst_set_gateway_subfuncid(devinfo, inst,
3615 BRW_MESSAGE_GATEWAY_SFID_BARRIER_MSG);
3616
3617 brw_inst_set_mask_control(devinfo, inst, BRW_MASK_DISABLE);
3618 brw_pop_insn_state(p);
3619 }
3620
3621
3622 /**
3623 * Emit the wait instruction for a barrier
3624 */
3625 void
3626 brw_WAIT(struct brw_codegen *p)
3627 {
3628 const struct gen_device_info *devinfo = p->devinfo;
3629 struct brw_inst *insn;
3630
3631 struct brw_reg src = brw_notification_reg();
3632
3633 insn = next_insn(p, BRW_OPCODE_WAIT);
3634 brw_set_dest(p, insn, src);
3635 brw_set_src0(p, insn, src);
3636 brw_set_src1(p, insn, brw_null_reg());
3637
3638 brw_inst_set_exec_size(devinfo, insn, BRW_EXECUTE_1);
3639 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
3640 }