4d1d6ce510b71e24adbf73bdd0229cd294b2552b
[mesa.git] / src / mesa / drivers / dri / i965 / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_context.h"
34 #include "brw_defines.h"
35 #include "brw_eu.h"
36
37 #include "util/ralloc.h"
38
39 /***********************************************************************
40 * Internal helper for constructing instructions
41 */
42
43 static void guess_execution_size(struct brw_compile *p,
44 brw_inst *insn,
45 struct brw_reg reg)
46 {
47 const struct brw_context *brw = p->brw;
48
49 if (reg.width == BRW_WIDTH_8 && p->compressed) {
50 brw_inst_set_exec_size(brw, insn, BRW_EXECUTE_16);
51 } else {
52 /* Register width definitions are compatible with BRW_EXECUTE_* enums. */
53 brw_inst_set_exec_size(brw, insn, reg.width);
54 }
55 }
56
57
58 /**
59 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
60 * registers, implicitly moving the operand to a message register.
61 *
62 * On Sandybridge, this is no longer the case. This function performs the
63 * explicit move; it should be called before emitting a SEND instruction.
64 */
65 void
66 gen6_resolve_implied_move(struct brw_compile *p,
67 struct brw_reg *src,
68 unsigned msg_reg_nr)
69 {
70 struct brw_context *brw = p->brw;
71 if (brw->gen < 6)
72 return;
73
74 if (src->file == BRW_MESSAGE_REGISTER_FILE)
75 return;
76
77 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
78 brw_push_insn_state(p);
79 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
80 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
81 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
82 retype(*src, BRW_REGISTER_TYPE_UD));
83 brw_pop_insn_state(p);
84 }
85 *src = brw_message_reg(msg_reg_nr);
86 }
87
88 static void
89 gen7_convert_mrf_to_grf(struct brw_compile *p, struct brw_reg *reg)
90 {
91 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
92 * "The send with EOT should use register space R112-R127 for <src>. This is
93 * to enable loading of a new thread into the same slot while the message
94 * with EOT for current thread is pending dispatch."
95 *
96 * Since we're pretending to have 16 MRFs anyway, we may as well use the
97 * registers required for messages with EOT.
98 */
99 struct brw_context *brw = p->brw;
100 if (brw->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
101 reg->file = BRW_GENERAL_REGISTER_FILE;
102 reg->nr += GEN7_MRF_HACK_START;
103 }
104 }
105
106 /**
107 * Convert a brw_reg_type enumeration value into the hardware representation.
108 *
109 * The hardware encoding may depend on whether the value is an immediate.
110 */
111 unsigned
112 brw_reg_type_to_hw_type(const struct brw_context *brw,
113 enum brw_reg_type type, unsigned file)
114 {
115 if (file == BRW_IMMEDIATE_VALUE) {
116 const static int imm_hw_types[] = {
117 [BRW_REGISTER_TYPE_UD] = BRW_HW_REG_TYPE_UD,
118 [BRW_REGISTER_TYPE_D] = BRW_HW_REG_TYPE_D,
119 [BRW_REGISTER_TYPE_UW] = BRW_HW_REG_TYPE_UW,
120 [BRW_REGISTER_TYPE_W] = BRW_HW_REG_TYPE_W,
121 [BRW_REGISTER_TYPE_F] = BRW_HW_REG_TYPE_F,
122 [BRW_REGISTER_TYPE_UB] = -1,
123 [BRW_REGISTER_TYPE_B] = -1,
124 [BRW_REGISTER_TYPE_UV] = BRW_HW_REG_IMM_TYPE_UV,
125 [BRW_REGISTER_TYPE_VF] = BRW_HW_REG_IMM_TYPE_VF,
126 [BRW_REGISTER_TYPE_V] = BRW_HW_REG_IMM_TYPE_V,
127 [BRW_REGISTER_TYPE_DF] = GEN8_HW_REG_IMM_TYPE_DF,
128 [BRW_REGISTER_TYPE_HF] = GEN8_HW_REG_IMM_TYPE_HF,
129 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_REG_TYPE_UQ,
130 [BRW_REGISTER_TYPE_Q] = GEN8_HW_REG_TYPE_Q,
131 };
132 assert(type < ARRAY_SIZE(imm_hw_types));
133 assert(imm_hw_types[type] != -1);
134 assert(brw->gen >= 8 || type < BRW_REGISTER_TYPE_DF);
135 return imm_hw_types[type];
136 } else {
137 /* Non-immediate registers */
138 const static int hw_types[] = {
139 [BRW_REGISTER_TYPE_UD] = BRW_HW_REG_TYPE_UD,
140 [BRW_REGISTER_TYPE_D] = BRW_HW_REG_TYPE_D,
141 [BRW_REGISTER_TYPE_UW] = BRW_HW_REG_TYPE_UW,
142 [BRW_REGISTER_TYPE_W] = BRW_HW_REG_TYPE_W,
143 [BRW_REGISTER_TYPE_UB] = BRW_HW_REG_NON_IMM_TYPE_UB,
144 [BRW_REGISTER_TYPE_B] = BRW_HW_REG_NON_IMM_TYPE_B,
145 [BRW_REGISTER_TYPE_F] = BRW_HW_REG_TYPE_F,
146 [BRW_REGISTER_TYPE_UV] = -1,
147 [BRW_REGISTER_TYPE_VF] = -1,
148 [BRW_REGISTER_TYPE_V] = -1,
149 [BRW_REGISTER_TYPE_DF] = GEN7_HW_REG_NON_IMM_TYPE_DF,
150 [BRW_REGISTER_TYPE_HF] = GEN8_HW_REG_NON_IMM_TYPE_HF,
151 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_REG_TYPE_UQ,
152 [BRW_REGISTER_TYPE_Q] = GEN8_HW_REG_TYPE_Q,
153 };
154 assert(type < ARRAY_SIZE(hw_types));
155 assert(hw_types[type] != -1);
156 assert(brw->gen >= 7 || type < BRW_REGISTER_TYPE_DF);
157 assert(brw->gen >= 8 || type < BRW_REGISTER_TYPE_HF);
158 return hw_types[type];
159 }
160 }
161
162 void
163 brw_set_dest(struct brw_compile *p, brw_inst *inst, struct brw_reg dest)
164 {
165 const struct brw_context *brw = p->brw;
166
167 if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE &&
168 dest.file != BRW_MESSAGE_REGISTER_FILE)
169 assert(dest.nr < 128);
170
171 gen7_convert_mrf_to_grf(p, &dest);
172
173 brw_inst_set_dst_reg_file(brw, inst, dest.file);
174 brw_inst_set_dst_reg_type(brw, inst, brw_reg_type_to_hw_type(brw, dest.type,
175 dest.file));
176 brw_inst_set_dst_address_mode(brw, inst, dest.address_mode);
177
178 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
179 brw_inst_set_dst_da_reg_nr(brw, inst, dest.nr);
180
181 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
182 brw_inst_set_dst_da1_subreg_nr(brw, inst, dest.subnr);
183 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
184 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
185 brw_inst_set_dst_hstride(brw, inst, dest.hstride);
186 } else {
187 brw_inst_set_dst_da16_subreg_nr(brw, inst, dest.subnr / 16);
188 brw_inst_set_da16_writemask(brw, inst, dest.dw1.bits.writemask);
189 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
190 dest.file == BRW_MESSAGE_REGISTER_FILE) {
191 assert(dest.dw1.bits.writemask != 0);
192 }
193 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
194 * Although Dst.HorzStride is a don't care for Align16, HW needs
195 * this to be programmed as "01".
196 */
197 brw_inst_set_dst_hstride(brw, inst, 1);
198 }
199 } else {
200 brw_inst_set_dst_ia_subreg_nr(brw, inst, dest.subnr);
201
202 /* These are different sizes in align1 vs align16:
203 */
204 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
205 brw_inst_set_dst_ia1_addr_imm(brw, inst,
206 dest.dw1.bits.indirect_offset);
207 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
208 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
209 brw_inst_set_dst_hstride(brw, inst, dest.hstride);
210 } else {
211 brw_inst_set_dst_ia16_addr_imm(brw, inst,
212 dest.dw1.bits.indirect_offset);
213 /* even ignored in da16, still need to set as '01' */
214 brw_inst_set_dst_hstride(brw, inst, 1);
215 }
216 }
217
218 /* NEW: Set the execution size based on dest.width and
219 * inst->compression_control:
220 */
221 guess_execution_size(p, inst, dest);
222 }
223
224 extern int reg_type_size[];
225
226 static void
227 validate_reg(const struct brw_context *brw, brw_inst *inst, struct brw_reg reg)
228 {
229 int hstride_for_reg[] = {0, 1, 2, 4};
230 int vstride_for_reg[] = {0, 1, 2, 4, 8, 16, 32, 64, 128, 256};
231 int width_for_reg[] = {1, 2, 4, 8, 16};
232 int execsize_for_reg[] = {1, 2, 4, 8, 16};
233 int width, hstride, vstride, execsize;
234
235 if (reg.file == BRW_IMMEDIATE_VALUE) {
236 /* 3.3.6: Region Parameters. Restriction: Immediate vectors
237 * mean the destination has to be 128-bit aligned and the
238 * destination horiz stride has to be a word.
239 */
240 if (reg.type == BRW_REGISTER_TYPE_V) {
241 assert(hstride_for_reg[brw_inst_dst_hstride(brw, inst)] *
242 reg_type_size[brw_inst_dst_reg_type(brw, inst)] == 2);
243 }
244
245 return;
246 }
247
248 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
249 reg.file == BRW_ARF_NULL)
250 return;
251
252 assert(reg.hstride >= 0 && reg.hstride < Elements(hstride_for_reg));
253 hstride = hstride_for_reg[reg.hstride];
254
255 if (reg.vstride == 0xf) {
256 vstride = -1;
257 } else {
258 assert(reg.vstride >= 0 && reg.vstride < Elements(vstride_for_reg));
259 vstride = vstride_for_reg[reg.vstride];
260 }
261
262 assert(reg.width >= 0 && reg.width < Elements(width_for_reg));
263 width = width_for_reg[reg.width];
264
265 assert(brw_inst_exec_size(brw, inst) >= 0 &&
266 brw_inst_exec_size(brw, inst) < Elements(execsize_for_reg));
267 execsize = execsize_for_reg[brw_inst_exec_size(brw, inst)];
268
269 /* Restrictions from 3.3.10: Register Region Restrictions. */
270 /* 3. */
271 assert(execsize >= width);
272
273 /* 4. */
274 if (execsize == width && hstride != 0) {
275 assert(vstride == -1 || vstride == width * hstride);
276 }
277
278 /* 5. */
279 if (execsize == width && hstride == 0) {
280 /* no restriction on vstride. */
281 }
282
283 /* 6. */
284 if (width == 1) {
285 assert(hstride == 0);
286 }
287
288 /* 7. */
289 if (execsize == 1 && width == 1) {
290 assert(hstride == 0);
291 assert(vstride == 0);
292 }
293
294 /* 8. */
295 if (vstride == 0 && hstride == 0) {
296 assert(width == 1);
297 }
298
299 /* 10. Check destination issues. */
300 }
301
302 static bool
303 is_compactable_immediate(unsigned imm)
304 {
305 /* We get the low 12 bits as-is. */
306 imm &= ~0xfff;
307
308 /* We get one bit replicated through the top 20 bits. */
309 return imm == 0 || imm == 0xfffff000;
310 }
311
312 void
313 brw_set_src0(struct brw_compile *p, brw_inst *inst, struct brw_reg reg)
314 {
315 struct brw_context *brw = p->brw;
316
317 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
318 assert(reg.nr < 128);
319
320 gen7_convert_mrf_to_grf(p, &reg);
321
322 if (brw->gen >= 6 && (brw_inst_opcode(brw, inst) == BRW_OPCODE_SEND ||
323 brw_inst_opcode(brw, inst) == BRW_OPCODE_SENDC)) {
324 /* Any source modifiers or regions will be ignored, since this just
325 * identifies the MRF/GRF to start reading the message contents from.
326 * Check for some likely failures.
327 */
328 assert(!reg.negate);
329 assert(!reg.abs);
330 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
331 }
332
333 validate_reg(brw, inst, reg);
334
335 brw_inst_set_src0_reg_file(brw, inst, reg.file);
336 brw_inst_set_src0_reg_type(brw, inst,
337 brw_reg_type_to_hw_type(brw, reg.type, reg.file));
338 brw_inst_set_src0_abs(brw, inst, reg.abs);
339 brw_inst_set_src0_negate(brw, inst, reg.negate);
340 brw_inst_set_src0_address_mode(brw, inst, reg.address_mode);
341
342 if (reg.file == BRW_IMMEDIATE_VALUE) {
343 brw_inst_set_imm_ud(brw, inst, reg.dw1.ud);
344
345 /* The Bspec's section titled "Non-present Operands" claims that if src0
346 * is an immediate that src1's type must be the same as that of src0.
347 *
348 * The SNB+ DataTypeIndex instruction compaction tables contain mappings
349 * that do not follow this rule. E.g., from the IVB/HSW table:
350 *
351 * DataTypeIndex 18-Bit Mapping Mapped Meaning
352 * 3 001000001011111101 r:f | i:vf | a:ud | <1> | dir |
353 *
354 * And from the SNB table:
355 *
356 * DataTypeIndex 18-Bit Mapping Mapped Meaning
357 * 8 001000000111101100 a:w | i:w | a:ud | <1> | dir |
358 *
359 * Neither of these cause warnings from the simulator when used,
360 * compacted or otherwise. In fact, all compaction mappings that have an
361 * immediate in src0 use a:ud for src1.
362 *
363 * The GM45 instruction compaction tables do not contain mapped meanings
364 * so it's not clear whether it has the restriction. We'll assume it was
365 * lifted on SNB. (FINISHME: decode the GM45 tables and check.)
366 */
367 brw_inst_set_src1_reg_file(brw, inst, BRW_ARCHITECTURE_REGISTER_FILE);
368 if (brw->gen < 6) {
369 brw_inst_set_src1_reg_type(brw, inst,
370 brw_inst_src0_reg_type(brw, inst));
371 } else {
372 brw_inst_set_src1_reg_type(brw, inst, BRW_HW_REG_TYPE_UD);
373 }
374
375 /* Compacted instructions only have 12-bits (plus 1 for the other 20)
376 * for immediate values. Presumably the hardware engineers realized
377 * that the only useful floating-point value that could be represented
378 * in this format is 0.0, which can also be represented as a VF-typed
379 * immediate, so they gave us the previously mentioned mapping on IVB+.
380 *
381 * Strangely, we do have a mapping for imm:f in src1, so we don't need
382 * to do this there.
383 *
384 * If we see a 0.0:F, change the type to VF so that it can be compacted.
385 */
386 if (brw_inst_imm_ud(brw, inst) == 0x0 &&
387 brw_inst_src0_reg_type(brw, inst) == BRW_HW_REG_TYPE_F) {
388 brw_inst_set_src0_reg_type(brw, inst, BRW_HW_REG_IMM_TYPE_VF);
389 }
390
391 /* There are no mappings for dst:d | i:d, so if the immediate is suitable
392 * set the types to :UD so the instruction can be compacted.
393 */
394 if (is_compactable_immediate(brw_inst_imm_ud(brw, inst)) &&
395 brw_inst_cond_modifier(brw, inst) == BRW_CONDITIONAL_NONE &&
396 brw_inst_src0_reg_type(brw, inst) == BRW_HW_REG_TYPE_D &&
397 brw_inst_dst_reg_type(brw, inst) == BRW_HW_REG_TYPE_D) {
398 brw_inst_set_src0_reg_type(brw, inst, BRW_HW_REG_TYPE_UD);
399 brw_inst_set_dst_reg_type(brw, inst, BRW_HW_REG_TYPE_UD);
400 }
401 } else {
402 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
403 brw_inst_set_src0_da_reg_nr(brw, inst, reg.nr);
404 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
405 brw_inst_set_src0_da1_subreg_nr(brw, inst, reg.subnr);
406 } else {
407 brw_inst_set_src0_da16_subreg_nr(brw, inst, reg.subnr / 16);
408 }
409 } else {
410 brw_inst_set_src0_ia_subreg_nr(brw, inst, reg.subnr);
411
412 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
413 brw_inst_set_src0_ia1_addr_imm(brw, inst, reg.dw1.bits.indirect_offset);
414 } else {
415 brw_inst_set_src0_ia_subreg_nr(brw, inst, reg.dw1.bits.indirect_offset);
416 }
417 }
418
419 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
420 if (reg.width == BRW_WIDTH_1 &&
421 brw_inst_exec_size(brw, inst) == BRW_EXECUTE_1) {
422 brw_inst_set_src0_hstride(brw, inst, BRW_HORIZONTAL_STRIDE_0);
423 brw_inst_set_src0_width(brw, inst, BRW_WIDTH_1);
424 brw_inst_set_src0_vstride(brw, inst, BRW_VERTICAL_STRIDE_0);
425 } else {
426 brw_inst_set_src0_hstride(brw, inst, reg.hstride);
427 brw_inst_set_src0_width(brw, inst, reg.width);
428 brw_inst_set_src0_vstride(brw, inst, reg.vstride);
429 }
430 } else {
431 brw_inst_set_src0_da16_swiz_x(brw, inst,
432 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_X));
433 brw_inst_set_src0_da16_swiz_y(brw, inst,
434 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Y));
435 brw_inst_set_src0_da16_swiz_z(brw, inst,
436 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Z));
437 brw_inst_set_src0_da16_swiz_w(brw, inst,
438 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_W));
439
440 /* This is an oddity of the fact we're using the same
441 * descriptions for registers in align_16 as align_1:
442 */
443 if (reg.vstride == BRW_VERTICAL_STRIDE_8)
444 brw_inst_set_src0_vstride(brw, inst, BRW_VERTICAL_STRIDE_4);
445 else
446 brw_inst_set_src0_vstride(brw, inst, reg.vstride);
447 }
448 }
449 }
450
451
452 void
453 brw_set_src1(struct brw_compile *p, brw_inst *inst, struct brw_reg reg)
454 {
455 const struct brw_context *brw = p->brw;
456 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
457
458 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
459 assert(reg.nr < 128);
460
461 gen7_convert_mrf_to_grf(p, &reg);
462
463 validate_reg(brw, inst, reg);
464
465 brw_inst_set_src1_reg_file(brw, inst, reg.file);
466 brw_inst_set_src1_reg_type(brw, inst,
467 brw_reg_type_to_hw_type(brw, reg.type, reg.file));
468 brw_inst_set_src1_abs(brw, inst, reg.abs);
469 brw_inst_set_src1_negate(brw, inst, reg.negate);
470
471 /* Only src1 can be immediate in two-argument instructions.
472 */
473 assert(brw_inst_src0_reg_file(brw, inst) != BRW_IMMEDIATE_VALUE);
474
475 if (reg.file == BRW_IMMEDIATE_VALUE) {
476 brw_inst_set_imm_ud(brw, inst, reg.dw1.ud);
477 } else {
478 /* This is a hardware restriction, which may or may not be lifted
479 * in the future:
480 */
481 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
482 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
483
484 brw_inst_set_src1_da_reg_nr(brw, inst, reg.nr);
485 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
486 brw_inst_set_src1_da1_subreg_nr(brw, inst, reg.subnr);
487 } else {
488 brw_inst_set_src1_da16_subreg_nr(brw, inst, reg.subnr / 16);
489 }
490
491 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
492 if (reg.width == BRW_WIDTH_1 &&
493 brw_inst_exec_size(brw, inst) == BRW_EXECUTE_1) {
494 brw_inst_set_src1_hstride(brw, inst, BRW_HORIZONTAL_STRIDE_0);
495 brw_inst_set_src1_width(brw, inst, BRW_WIDTH_1);
496 brw_inst_set_src1_vstride(brw, inst, BRW_VERTICAL_STRIDE_0);
497 } else {
498 brw_inst_set_src1_hstride(brw, inst, reg.hstride);
499 brw_inst_set_src1_width(brw, inst, reg.width);
500 brw_inst_set_src1_vstride(brw, inst, reg.vstride);
501 }
502 } else {
503 brw_inst_set_src1_da16_swiz_x(brw, inst,
504 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_X));
505 brw_inst_set_src1_da16_swiz_y(brw, inst,
506 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Y));
507 brw_inst_set_src1_da16_swiz_z(brw, inst,
508 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Z));
509 brw_inst_set_src1_da16_swiz_w(brw, inst,
510 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_W));
511
512 /* This is an oddity of the fact we're using the same
513 * descriptions for registers in align_16 as align_1:
514 */
515 if (reg.vstride == BRW_VERTICAL_STRIDE_8)
516 brw_inst_set_src1_vstride(brw, inst, BRW_VERTICAL_STRIDE_4);
517 else
518 brw_inst_set_src1_vstride(brw, inst, reg.vstride);
519 }
520 }
521 }
522
523 /**
524 * Set the Message Descriptor and Extended Message Descriptor fields
525 * for SEND messages.
526 *
527 * \note This zeroes out the Function Control bits, so it must be called
528 * \b before filling out any message-specific data. Callers can
529 * choose not to fill in irrelevant bits; they will be zero.
530 */
531 static void
532 brw_set_message_descriptor(struct brw_compile *p,
533 brw_inst *inst,
534 enum brw_message_target sfid,
535 unsigned msg_length,
536 unsigned response_length,
537 bool header_present,
538 bool end_of_thread)
539 {
540 struct brw_context *brw = p->brw;
541
542 brw_set_src1(p, inst, brw_imm_d(0));
543 brw_inst_set_sfid(brw, inst, sfid);
544 brw_inst_set_mlen(brw, inst, msg_length);
545 brw_inst_set_rlen(brw, inst, response_length);
546 brw_inst_set_eot(brw, inst, end_of_thread);
547
548 if (brw->gen >= 5) {
549 brw_inst_set_header_present(brw, inst, header_present);
550 }
551 }
552
553 static void brw_set_math_message( struct brw_compile *p,
554 brw_inst *inst,
555 unsigned function,
556 unsigned integer_type,
557 bool low_precision,
558 unsigned dataType )
559 {
560 struct brw_context *brw = p->brw;
561 unsigned msg_length;
562 unsigned response_length;
563
564 /* Infer message length from the function */
565 switch (function) {
566 case BRW_MATH_FUNCTION_POW:
567 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
568 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
569 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
570 msg_length = 2;
571 break;
572 default:
573 msg_length = 1;
574 break;
575 }
576
577 /* Infer response length from the function */
578 switch (function) {
579 case BRW_MATH_FUNCTION_SINCOS:
580 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
581 response_length = 2;
582 break;
583 default:
584 response_length = 1;
585 break;
586 }
587
588
589 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
590 msg_length, response_length, false, false);
591 brw_inst_set_math_msg_function(brw, inst, function);
592 brw_inst_set_math_msg_signed_int(brw, inst, integer_type);
593 brw_inst_set_math_msg_precision(brw, inst, low_precision);
594 brw_inst_set_math_msg_saturate(brw, inst, brw_inst_saturate(brw, inst));
595 brw_inst_set_math_msg_data_type(brw, inst, dataType);
596 brw_inst_set_saturate(brw, inst, 0);
597 }
598
599
600 static void brw_set_ff_sync_message(struct brw_compile *p,
601 brw_inst *insn,
602 bool allocate,
603 unsigned response_length,
604 bool end_of_thread)
605 {
606 const struct brw_context *brw = p->brw;
607
608 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
609 1, response_length, true, end_of_thread);
610 brw_inst_set_urb_opcode(brw, insn, 1); /* FF_SYNC */
611 brw_inst_set_urb_allocate(brw, insn, allocate);
612 /* The following fields are not used by FF_SYNC: */
613 brw_inst_set_urb_global_offset(brw, insn, 0);
614 brw_inst_set_urb_swizzle_control(brw, insn, 0);
615 brw_inst_set_urb_used(brw, insn, 0);
616 brw_inst_set_urb_complete(brw, insn, 0);
617 }
618
619 static void brw_set_urb_message( struct brw_compile *p,
620 brw_inst *insn,
621 enum brw_urb_write_flags flags,
622 unsigned msg_length,
623 unsigned response_length,
624 unsigned offset,
625 unsigned swizzle_control )
626 {
627 struct brw_context *brw = p->brw;
628
629 assert(brw->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
630 assert(brw->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
631 assert(brw->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
632
633 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
634 msg_length, response_length, true,
635 flags & BRW_URB_WRITE_EOT);
636
637 if (flags & BRW_URB_WRITE_OWORD) {
638 assert(msg_length == 2); /* header + one OWORD of data */
639 brw_inst_set_urb_opcode(brw, insn, BRW_URB_OPCODE_WRITE_OWORD);
640 } else {
641 brw_inst_set_urb_opcode(brw, insn, BRW_URB_OPCODE_WRITE_HWORD);
642 }
643
644 brw_inst_set_urb_global_offset(brw, insn, offset);
645 brw_inst_set_urb_swizzle_control(brw, insn, swizzle_control);
646
647 if (brw->gen < 8) {
648 brw_inst_set_urb_complete(brw, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
649 }
650
651 if (brw->gen < 7) {
652 brw_inst_set_urb_allocate(brw, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
653 brw_inst_set_urb_used(brw, insn, !(flags & BRW_URB_WRITE_UNUSED));
654 } else {
655 brw_inst_set_urb_per_slot_offset(brw, insn,
656 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
657 }
658 }
659
660 void
661 brw_set_dp_write_message(struct brw_compile *p,
662 brw_inst *insn,
663 unsigned binding_table_index,
664 unsigned msg_control,
665 unsigned msg_type,
666 unsigned msg_length,
667 bool header_present,
668 unsigned last_render_target,
669 unsigned response_length,
670 unsigned end_of_thread,
671 unsigned send_commit_msg)
672 {
673 struct brw_context *brw = p->brw;
674 unsigned sfid;
675
676 if (brw->gen >= 7) {
677 /* Use the Render Cache for RT writes; otherwise use the Data Cache */
678 if (msg_type == GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE)
679 sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
680 else
681 sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
682 } else if (brw->gen == 6) {
683 /* Use the render cache for all write messages. */
684 sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
685 } else {
686 sfid = BRW_SFID_DATAPORT_WRITE;
687 }
688
689 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
690 header_present, end_of_thread);
691
692 brw_inst_set_binding_table_index(brw, insn, binding_table_index);
693 brw_inst_set_dp_write_msg_type(brw, insn, msg_type);
694 brw_inst_set_dp_write_msg_control(brw, insn, msg_control);
695 brw_inst_set_rt_last(brw, insn, last_render_target);
696 if (brw->gen < 7) {
697 brw_inst_set_dp_write_commit(brw, insn, send_commit_msg);
698 }
699 }
700
701 void
702 brw_set_dp_read_message(struct brw_compile *p,
703 brw_inst *insn,
704 unsigned binding_table_index,
705 unsigned msg_control,
706 unsigned msg_type,
707 unsigned target_cache,
708 unsigned msg_length,
709 bool header_present,
710 unsigned response_length)
711 {
712 struct brw_context *brw = p->brw;
713 unsigned sfid;
714
715 if (brw->gen >= 7) {
716 sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
717 } else if (brw->gen == 6) {
718 if (target_cache == BRW_DATAPORT_READ_TARGET_RENDER_CACHE)
719 sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
720 else
721 sfid = GEN6_SFID_DATAPORT_SAMPLER_CACHE;
722 } else {
723 sfid = BRW_SFID_DATAPORT_READ;
724 }
725
726 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
727 header_present, false);
728
729 brw_inst_set_binding_table_index(brw, insn, binding_table_index);
730 brw_inst_set_dp_read_msg_type(brw, insn, msg_type);
731 brw_inst_set_dp_read_msg_control(brw, insn, msg_control);
732 if (brw->gen < 6)
733 brw_inst_set_dp_read_target_cache(brw, insn, target_cache);
734 }
735
736 void
737 brw_set_sampler_message(struct brw_compile *p,
738 brw_inst *inst,
739 unsigned binding_table_index,
740 unsigned sampler,
741 unsigned msg_type,
742 unsigned response_length,
743 unsigned msg_length,
744 unsigned header_present,
745 unsigned simd_mode,
746 unsigned return_format)
747 {
748 struct brw_context *brw = p->brw;
749
750 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
751 response_length, header_present, false);
752
753 brw_inst_set_binding_table_index(brw, inst, binding_table_index);
754 brw_inst_set_sampler(brw, inst, sampler);
755 brw_inst_set_sampler_msg_type(brw, inst, msg_type);
756 if (brw->gen >= 5) {
757 brw_inst_set_sampler_simd_mode(brw, inst, simd_mode);
758 } else if (brw->gen == 4 && !brw->is_g4x) {
759 brw_inst_set_sampler_return_format(brw, inst, return_format);
760 }
761 }
762
763 static void
764 gen7_set_dp_scratch_message(struct brw_compile *p,
765 brw_inst *inst,
766 bool write,
767 bool dword,
768 bool invalidate_after_read,
769 unsigned num_regs,
770 unsigned addr_offset,
771 unsigned mlen,
772 unsigned rlen,
773 bool header_present)
774 {
775 const struct brw_context *brw = p->brw;
776 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
777 (brw->gen >= 8 && num_regs == 8));
778 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
779 mlen, rlen, header_present, false);
780 brw_inst_set_dp_category(brw, inst, 1); /* Scratch Block Read/Write msgs */
781 brw_inst_set_scratch_read_write(brw, inst, write);
782 brw_inst_set_scratch_type(brw, inst, dword);
783 brw_inst_set_scratch_invalidate_after_read(brw, inst, invalidate_after_read);
784 brw_inst_set_scratch_block_size(brw, inst, ffs(num_regs) - 1);
785 brw_inst_set_scratch_addr_offset(brw, inst, addr_offset);
786 }
787
788 #define next_insn brw_next_insn
789 brw_inst *
790 brw_next_insn(struct brw_compile *p, unsigned opcode)
791 {
792 const struct brw_context *brw = p->brw;
793 brw_inst *insn;
794
795 if (p->nr_insn + 1 > p->store_size) {
796 p->store_size <<= 1;
797 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
798 }
799
800 p->next_insn_offset += 16;
801 insn = &p->store[p->nr_insn++];
802 memcpy(insn, p->current, sizeof(*insn));
803
804 brw_inst_set_opcode(brw, insn, opcode);
805 return insn;
806 }
807
808 static brw_inst *
809 brw_alu1(struct brw_compile *p, unsigned opcode,
810 struct brw_reg dest, struct brw_reg src)
811 {
812 brw_inst *insn = next_insn(p, opcode);
813 brw_set_dest(p, insn, dest);
814 brw_set_src0(p, insn, src);
815 return insn;
816 }
817
818 static brw_inst *
819 brw_alu2(struct brw_compile *p, unsigned opcode,
820 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
821 {
822 brw_inst *insn = next_insn(p, opcode);
823 brw_set_dest(p, insn, dest);
824 brw_set_src0(p, insn, src0);
825 brw_set_src1(p, insn, src1);
826 return insn;
827 }
828
829 static int
830 get_3src_subreg_nr(struct brw_reg reg)
831 {
832 if (reg.vstride == BRW_VERTICAL_STRIDE_0) {
833 assert(brw_is_single_value_swizzle(reg.dw1.bits.swizzle));
834 return reg.subnr / 4 + BRW_GET_SWZ(reg.dw1.bits.swizzle, 0);
835 } else {
836 return reg.subnr / 4;
837 }
838 }
839
840 static brw_inst *
841 brw_alu3(struct brw_compile *p, unsigned opcode, struct brw_reg dest,
842 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
843 {
844 struct brw_context *brw = p->brw;
845 brw_inst *inst = next_insn(p, opcode);
846
847 gen7_convert_mrf_to_grf(p, &dest);
848
849 assert(brw_inst_access_mode(brw, inst) == BRW_ALIGN_16);
850
851 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
852 dest.file == BRW_MESSAGE_REGISTER_FILE);
853 assert(dest.nr < 128);
854 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
855 assert(dest.type == BRW_REGISTER_TYPE_F ||
856 dest.type == BRW_REGISTER_TYPE_D ||
857 dest.type == BRW_REGISTER_TYPE_UD);
858 if (brw->gen == 6) {
859 brw_inst_set_3src_dst_reg_file(brw, inst,
860 dest.file == BRW_MESSAGE_REGISTER_FILE);
861 }
862 brw_inst_set_3src_dst_reg_nr(brw, inst, dest.nr);
863 brw_inst_set_3src_dst_subreg_nr(brw, inst, dest.subnr / 16);
864 brw_inst_set_3src_dst_writemask(brw, inst, dest.dw1.bits.writemask);
865 guess_execution_size(p, inst, dest);
866
867 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
868 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
869 assert(src0.nr < 128);
870 brw_inst_set_3src_src0_swizzle(brw, inst, src0.dw1.bits.swizzle);
871 brw_inst_set_3src_src0_subreg_nr(brw, inst, get_3src_subreg_nr(src0));
872 brw_inst_set_3src_src0_reg_nr(brw, inst, src0.nr);
873 brw_inst_set_3src_src0_abs(brw, inst, src0.abs);
874 brw_inst_set_3src_src0_negate(brw, inst, src0.negate);
875 brw_inst_set_3src_src0_rep_ctrl(brw, inst,
876 src0.vstride == BRW_VERTICAL_STRIDE_0);
877
878 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
879 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
880 assert(src1.nr < 128);
881 brw_inst_set_3src_src1_swizzle(brw, inst, src1.dw1.bits.swizzle);
882 brw_inst_set_3src_src1_subreg_nr(brw, inst, get_3src_subreg_nr(src1));
883 brw_inst_set_3src_src1_reg_nr(brw, inst, src1.nr);
884 brw_inst_set_3src_src1_abs(brw, inst, src1.abs);
885 brw_inst_set_3src_src1_negate(brw, inst, src1.negate);
886 brw_inst_set_3src_src1_rep_ctrl(brw, inst,
887 src1.vstride == BRW_VERTICAL_STRIDE_0);
888
889 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
890 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
891 assert(src2.nr < 128);
892 brw_inst_set_3src_src2_swizzle(brw, inst, src2.dw1.bits.swizzle);
893 brw_inst_set_3src_src2_subreg_nr(brw, inst, get_3src_subreg_nr(src2));
894 brw_inst_set_3src_src2_reg_nr(brw, inst, src2.nr);
895 brw_inst_set_3src_src2_abs(brw, inst, src2.abs);
896 brw_inst_set_3src_src2_negate(brw, inst, src2.negate);
897 brw_inst_set_3src_src2_rep_ctrl(brw, inst,
898 src2.vstride == BRW_VERTICAL_STRIDE_0);
899
900 if (brw->gen >= 7) {
901 /* Set both the source and destination types based on dest.type,
902 * ignoring the source register types. The MAD and LRP emitters ensure
903 * that all four types are float. The BFE and BFI2 emitters, however,
904 * may send us mixed D and UD types and want us to ignore that and use
905 * the destination type.
906 */
907 switch (dest.type) {
908 case BRW_REGISTER_TYPE_F:
909 brw_inst_set_3src_src_type(brw, inst, BRW_3SRC_TYPE_F);
910 brw_inst_set_3src_dst_type(brw, inst, BRW_3SRC_TYPE_F);
911 break;
912 case BRW_REGISTER_TYPE_D:
913 brw_inst_set_3src_src_type(brw, inst, BRW_3SRC_TYPE_D);
914 brw_inst_set_3src_dst_type(brw, inst, BRW_3SRC_TYPE_D);
915 break;
916 case BRW_REGISTER_TYPE_UD:
917 brw_inst_set_3src_src_type(brw, inst, BRW_3SRC_TYPE_UD);
918 brw_inst_set_3src_dst_type(brw, inst, BRW_3SRC_TYPE_UD);
919 break;
920 }
921 }
922
923 return inst;
924 }
925
926
927 /***********************************************************************
928 * Convenience routines.
929 */
930 #define ALU1(OP) \
931 brw_inst *brw_##OP(struct brw_compile *p, \
932 struct brw_reg dest, \
933 struct brw_reg src0) \
934 { \
935 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
936 }
937
938 #define ALU2(OP) \
939 brw_inst *brw_##OP(struct brw_compile *p, \
940 struct brw_reg dest, \
941 struct brw_reg src0, \
942 struct brw_reg src1) \
943 { \
944 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
945 }
946
947 #define ALU3(OP) \
948 brw_inst *brw_##OP(struct brw_compile *p, \
949 struct brw_reg dest, \
950 struct brw_reg src0, \
951 struct brw_reg src1, \
952 struct brw_reg src2) \
953 { \
954 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
955 }
956
957 #define ALU3F(OP) \
958 brw_inst *brw_##OP(struct brw_compile *p, \
959 struct brw_reg dest, \
960 struct brw_reg src0, \
961 struct brw_reg src1, \
962 struct brw_reg src2) \
963 { \
964 assert(dest.type == BRW_REGISTER_TYPE_F); \
965 assert(src0.type == BRW_REGISTER_TYPE_F); \
966 assert(src1.type == BRW_REGISTER_TYPE_F); \
967 assert(src2.type == BRW_REGISTER_TYPE_F); \
968 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
969 }
970
971 /* Rounding operations (other than RNDD) require two instructions - the first
972 * stores a rounded value (possibly the wrong way) in the dest register, but
973 * also sets a per-channel "increment bit" in the flag register. A predicated
974 * add of 1.0 fixes dest to contain the desired result.
975 *
976 * Sandybridge and later appear to round correctly without an ADD.
977 */
978 #define ROUND(OP) \
979 void brw_##OP(struct brw_compile *p, \
980 struct brw_reg dest, \
981 struct brw_reg src) \
982 { \
983 struct brw_context *brw = p->brw; \
984 brw_inst *rnd, *add; \
985 rnd = next_insn(p, BRW_OPCODE_##OP); \
986 brw_set_dest(p, rnd, dest); \
987 brw_set_src0(p, rnd, src); \
988 \
989 if (brw->gen < 6) { \
990 /* turn on round-increments */ \
991 brw_inst_set_cond_modifier(brw, rnd, BRW_CONDITIONAL_R); \
992 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
993 brw_inst_set_pred_control(brw, add, BRW_PREDICATE_NORMAL); \
994 } \
995 }
996
997
998 ALU1(MOV)
999 ALU2(SEL)
1000 ALU1(NOT)
1001 ALU2(AND)
1002 ALU2(OR)
1003 ALU2(XOR)
1004 ALU2(SHR)
1005 ALU2(SHL)
1006 ALU2(ASR)
1007 ALU1(FRC)
1008 ALU1(RNDD)
1009 ALU2(MAC)
1010 ALU2(MACH)
1011 ALU1(LZD)
1012 ALU2(DP4)
1013 ALU2(DPH)
1014 ALU2(DP3)
1015 ALU2(DP2)
1016 ALU2(LINE)
1017 ALU2(PLN)
1018 ALU3F(MAD)
1019 ALU3F(LRP)
1020 ALU1(BFREV)
1021 ALU3(BFE)
1022 ALU2(BFI1)
1023 ALU3(BFI2)
1024 ALU1(FBH)
1025 ALU1(FBL)
1026 ALU1(CBIT)
1027 ALU2(ADDC)
1028 ALU2(SUBB)
1029
1030 ROUND(RNDZ)
1031 ROUND(RNDE)
1032
1033
1034 brw_inst *
1035 brw_ADD(struct brw_compile *p, struct brw_reg dest,
1036 struct brw_reg src0, struct brw_reg src1)
1037 {
1038 /* 6.2.2: add */
1039 if (src0.type == BRW_REGISTER_TYPE_F ||
1040 (src0.file == BRW_IMMEDIATE_VALUE &&
1041 src0.type == BRW_REGISTER_TYPE_VF)) {
1042 assert(src1.type != BRW_REGISTER_TYPE_UD);
1043 assert(src1.type != BRW_REGISTER_TYPE_D);
1044 }
1045
1046 if (src1.type == BRW_REGISTER_TYPE_F ||
1047 (src1.file == BRW_IMMEDIATE_VALUE &&
1048 src1.type == BRW_REGISTER_TYPE_VF)) {
1049 assert(src0.type != BRW_REGISTER_TYPE_UD);
1050 assert(src0.type != BRW_REGISTER_TYPE_D);
1051 }
1052
1053 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1054 }
1055
1056 brw_inst *
1057 brw_AVG(struct brw_compile *p, struct brw_reg dest,
1058 struct brw_reg src0, struct brw_reg src1)
1059 {
1060 assert(dest.type == src0.type);
1061 assert(src0.type == src1.type);
1062 switch (src0.type) {
1063 case BRW_REGISTER_TYPE_B:
1064 case BRW_REGISTER_TYPE_UB:
1065 case BRW_REGISTER_TYPE_W:
1066 case BRW_REGISTER_TYPE_UW:
1067 case BRW_REGISTER_TYPE_D:
1068 case BRW_REGISTER_TYPE_UD:
1069 break;
1070 default:
1071 unreachable("Bad type for brw_AVG");
1072 }
1073
1074 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1075 }
1076
1077 brw_inst *
1078 brw_MUL(struct brw_compile *p, struct brw_reg dest,
1079 struct brw_reg src0, struct brw_reg src1)
1080 {
1081 /* 6.32.38: mul */
1082 if (src0.type == BRW_REGISTER_TYPE_D ||
1083 src0.type == BRW_REGISTER_TYPE_UD ||
1084 src1.type == BRW_REGISTER_TYPE_D ||
1085 src1.type == BRW_REGISTER_TYPE_UD) {
1086 assert(dest.type != BRW_REGISTER_TYPE_F);
1087 }
1088
1089 if (src0.type == BRW_REGISTER_TYPE_F ||
1090 (src0.file == BRW_IMMEDIATE_VALUE &&
1091 src0.type == BRW_REGISTER_TYPE_VF)) {
1092 assert(src1.type != BRW_REGISTER_TYPE_UD);
1093 assert(src1.type != BRW_REGISTER_TYPE_D);
1094 }
1095
1096 if (src1.type == BRW_REGISTER_TYPE_F ||
1097 (src1.file == BRW_IMMEDIATE_VALUE &&
1098 src1.type == BRW_REGISTER_TYPE_VF)) {
1099 assert(src0.type != BRW_REGISTER_TYPE_UD);
1100 assert(src0.type != BRW_REGISTER_TYPE_D);
1101 }
1102
1103 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1104 src0.nr != BRW_ARF_ACCUMULATOR);
1105 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1106 src1.nr != BRW_ARF_ACCUMULATOR);
1107
1108 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1109 }
1110
1111 brw_inst *
1112 brw_F32TO16(struct brw_compile *p, struct brw_reg dst, struct brw_reg src)
1113 {
1114 const struct brw_context *brw = p->brw;
1115 bool align16 = brw_inst_access_mode(brw, p->current) == BRW_ALIGN_16;
1116
1117 if (align16) {
1118 assert(dst.type == BRW_REGISTER_TYPE_UD);
1119 } else {
1120 assert(dst.type == BRW_REGISTER_TYPE_W ||
1121 dst.type == BRW_REGISTER_TYPE_UW ||
1122 dst.type == BRW_REGISTER_TYPE_HF);
1123 }
1124
1125 if (brw->gen >= 8) {
1126 if (align16) {
1127 /* Emulate the Gen7 zeroing bug (see comments in vec4_visitor's
1128 * emit_pack_half_2x16 method.)
1129 */
1130 brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
1131 }
1132 return brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1133 } else {
1134 assert(brw->gen == 7);
1135 return brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1136 }
1137 }
1138
1139 brw_inst *
1140 brw_F16TO32(struct brw_compile *p, struct brw_reg dst, struct brw_reg src)
1141 {
1142 const struct brw_context *brw = p->brw;
1143 bool align16 = brw_inst_access_mode(brw, p->current) == BRW_ALIGN_16;
1144
1145 if (align16) {
1146 assert(src.type == BRW_REGISTER_TYPE_UD);
1147 } else {
1148 assert(src.type == BRW_REGISTER_TYPE_W ||
1149 src.type == BRW_REGISTER_TYPE_UW ||
1150 src.type == BRW_REGISTER_TYPE_HF);
1151 }
1152
1153 if (brw->gen >= 8) {
1154 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1155 } else {
1156 assert(brw->gen == 7);
1157 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1158 }
1159 }
1160
1161
1162 void brw_NOP(struct brw_compile *p)
1163 {
1164 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1165 brw_set_dest(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
1166 brw_set_src0(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
1167 brw_set_src1(p, insn, brw_imm_ud(0x0));
1168 }
1169
1170
1171
1172
1173
1174 /***********************************************************************
1175 * Comparisons, if/else/endif
1176 */
1177
1178 brw_inst *
1179 brw_JMPI(struct brw_compile *p, struct brw_reg index,
1180 unsigned predicate_control)
1181 {
1182 const struct brw_context *brw = p->brw;
1183 struct brw_reg ip = brw_ip_reg();
1184 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1185
1186 brw_inst_set_exec_size(brw, inst, BRW_EXECUTE_2);
1187 brw_inst_set_qtr_control(brw, inst, BRW_COMPRESSION_NONE);
1188 brw_inst_set_mask_control(brw, inst, BRW_MASK_DISABLE);
1189 brw_inst_set_pred_control(brw, inst, predicate_control);
1190
1191 return inst;
1192 }
1193
1194 static void
1195 push_if_stack(struct brw_compile *p, brw_inst *inst)
1196 {
1197 p->if_stack[p->if_stack_depth] = inst - p->store;
1198
1199 p->if_stack_depth++;
1200 if (p->if_stack_array_size <= p->if_stack_depth) {
1201 p->if_stack_array_size *= 2;
1202 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1203 p->if_stack_array_size);
1204 }
1205 }
1206
1207 static brw_inst *
1208 pop_if_stack(struct brw_compile *p)
1209 {
1210 p->if_stack_depth--;
1211 return &p->store[p->if_stack[p->if_stack_depth]];
1212 }
1213
1214 static void
1215 push_loop_stack(struct brw_compile *p, brw_inst *inst)
1216 {
1217 if (p->loop_stack_array_size < p->loop_stack_depth) {
1218 p->loop_stack_array_size *= 2;
1219 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1220 p->loop_stack_array_size);
1221 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1222 p->loop_stack_array_size);
1223 }
1224
1225 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1226 p->loop_stack_depth++;
1227 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1228 }
1229
1230 static brw_inst *
1231 get_inner_do_insn(struct brw_compile *p)
1232 {
1233 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1234 }
1235
1236 /* EU takes the value from the flag register and pushes it onto some
1237 * sort of a stack (presumably merging with any flag value already on
1238 * the stack). Within an if block, the flags at the top of the stack
1239 * control execution on each channel of the unit, eg. on each of the
1240 * 16 pixel values in our wm programs.
1241 *
1242 * When the matching 'else' instruction is reached (presumably by
1243 * countdown of the instruction count patched in by our ELSE/ENDIF
1244 * functions), the relevent flags are inverted.
1245 *
1246 * When the matching 'endif' instruction is reached, the flags are
1247 * popped off. If the stack is now empty, normal execution resumes.
1248 */
1249 brw_inst *
1250 brw_IF(struct brw_compile *p, unsigned execute_size)
1251 {
1252 struct brw_context *brw = p->brw;
1253 brw_inst *insn;
1254
1255 insn = next_insn(p, BRW_OPCODE_IF);
1256
1257 /* Override the defaults for this instruction:
1258 */
1259 if (brw->gen < 6) {
1260 brw_set_dest(p, insn, brw_ip_reg());
1261 brw_set_src0(p, insn, brw_ip_reg());
1262 brw_set_src1(p, insn, brw_imm_d(0x0));
1263 } else if (brw->gen == 6) {
1264 brw_set_dest(p, insn, brw_imm_w(0));
1265 brw_inst_set_gen6_jump_count(brw, insn, 0);
1266 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1267 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1268 } else {
1269 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1270 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1271 brw_set_src1(p, insn, brw_imm_ud(0));
1272 brw_inst_set_jip(brw, insn, 0);
1273 brw_inst_set_uip(brw, insn, 0);
1274 }
1275
1276 brw_inst_set_exec_size(brw, insn, execute_size);
1277 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1278 brw_inst_set_pred_control(brw, insn, BRW_PREDICATE_NORMAL);
1279 brw_inst_set_mask_control(brw, insn, BRW_MASK_ENABLE);
1280 if (!p->single_program_flow && brw->gen < 6)
1281 brw_inst_set_thread_control(brw, insn, BRW_THREAD_SWITCH);
1282
1283 push_if_stack(p, insn);
1284 p->if_depth_in_loop[p->loop_stack_depth]++;
1285 return insn;
1286 }
1287
1288 /* This function is only used for gen6-style IF instructions with an
1289 * embedded comparison (conditional modifier). It is not used on gen7.
1290 */
1291 brw_inst *
1292 gen6_IF(struct brw_compile *p, enum brw_conditional_mod conditional,
1293 struct brw_reg src0, struct brw_reg src1)
1294 {
1295 const struct brw_context *brw = p->brw;
1296 brw_inst *insn;
1297
1298 insn = next_insn(p, BRW_OPCODE_IF);
1299
1300 brw_set_dest(p, insn, brw_imm_w(0));
1301 brw_inst_set_exec_size(brw, insn, p->compressed ? BRW_EXECUTE_16
1302 : BRW_EXECUTE_8);
1303 brw_inst_set_gen6_jump_count(brw, insn, 0);
1304 brw_set_src0(p, insn, src0);
1305 brw_set_src1(p, insn, src1);
1306
1307 assert(brw_inst_qtr_control(brw, insn) == BRW_COMPRESSION_NONE);
1308 assert(brw_inst_pred_control(brw, insn) == BRW_PREDICATE_NONE);
1309 brw_inst_set_cond_modifier(brw, insn, conditional);
1310
1311 push_if_stack(p, insn);
1312 return insn;
1313 }
1314
1315 /**
1316 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1317 */
1318 static void
1319 convert_IF_ELSE_to_ADD(struct brw_compile *p,
1320 brw_inst *if_inst, brw_inst *else_inst)
1321 {
1322 const struct brw_context *brw = p->brw;
1323
1324 /* The next instruction (where the ENDIF would be, if it existed) */
1325 brw_inst *next_inst = &p->store[p->nr_insn];
1326
1327 assert(p->single_program_flow);
1328 assert(if_inst != NULL && brw_inst_opcode(brw, if_inst) == BRW_OPCODE_IF);
1329 assert(else_inst == NULL || brw_inst_opcode(brw, else_inst) == BRW_OPCODE_ELSE);
1330 assert(brw_inst_exec_size(brw, if_inst) == BRW_EXECUTE_1);
1331
1332 /* Convert IF to an ADD instruction that moves the instruction pointer
1333 * to the first instruction of the ELSE block. If there is no ELSE
1334 * block, point to where ENDIF would be. Reverse the predicate.
1335 *
1336 * There's no need to execute an ENDIF since we don't need to do any
1337 * stack operations, and if we're currently executing, we just want to
1338 * continue normally.
1339 */
1340 brw_inst_set_opcode(brw, if_inst, BRW_OPCODE_ADD);
1341 brw_inst_set_pred_inv(brw, if_inst, true);
1342
1343 if (else_inst != NULL) {
1344 /* Convert ELSE to an ADD instruction that points where the ENDIF
1345 * would be.
1346 */
1347 brw_inst_set_opcode(brw, else_inst, BRW_OPCODE_ADD);
1348
1349 brw_inst_set_imm_ud(brw, if_inst, (else_inst - if_inst + 1) * 16);
1350 brw_inst_set_imm_ud(brw, else_inst, (next_inst - else_inst) * 16);
1351 } else {
1352 brw_inst_set_imm_ud(brw, if_inst, (next_inst - if_inst) * 16);
1353 }
1354 }
1355
1356 /**
1357 * Patch IF and ELSE instructions with appropriate jump targets.
1358 */
1359 static void
1360 patch_IF_ELSE(struct brw_compile *p,
1361 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1362 {
1363 struct brw_context *brw = p->brw;
1364
1365 /* We shouldn't be patching IF and ELSE instructions in single program flow
1366 * mode when gen < 6, because in single program flow mode on those
1367 * platforms, we convert flow control instructions to conditional ADDs that
1368 * operate on IP (see brw_ENDIF).
1369 *
1370 * However, on Gen6, writing to IP doesn't work in single program flow mode
1371 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1372 * not be updated by non-flow control instructions."). And on later
1373 * platforms, there is no significant benefit to converting control flow
1374 * instructions to conditional ADDs. So we do patch IF and ELSE
1375 * instructions in single program flow mode on those platforms.
1376 */
1377 if (brw->gen < 6)
1378 assert(!p->single_program_flow);
1379
1380 assert(if_inst != NULL && brw_inst_opcode(brw, if_inst) == BRW_OPCODE_IF);
1381 assert(endif_inst != NULL);
1382 assert(else_inst == NULL || brw_inst_opcode(brw, else_inst) == BRW_OPCODE_ELSE);
1383
1384 unsigned br = brw_jump_scale(brw);
1385
1386 assert(brw_inst_opcode(brw, endif_inst) == BRW_OPCODE_ENDIF);
1387 brw_inst_set_exec_size(brw, endif_inst, brw_inst_exec_size(brw, if_inst));
1388
1389 if (else_inst == NULL) {
1390 /* Patch IF -> ENDIF */
1391 if (brw->gen < 6) {
1392 /* Turn it into an IFF, which means no mask stack operations for
1393 * all-false and jumping past the ENDIF.
1394 */
1395 brw_inst_set_opcode(brw, if_inst, BRW_OPCODE_IFF);
1396 brw_inst_set_gen4_jump_count(brw, if_inst,
1397 br * (endif_inst - if_inst + 1));
1398 brw_inst_set_gen4_pop_count(brw, if_inst, 0);
1399 } else if (brw->gen == 6) {
1400 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1401 brw_inst_set_gen6_jump_count(brw, if_inst, br*(endif_inst - if_inst));
1402 } else {
1403 brw_inst_set_uip(brw, if_inst, br * (endif_inst - if_inst));
1404 brw_inst_set_jip(brw, if_inst, br * (endif_inst - if_inst));
1405 }
1406 } else {
1407 brw_inst_set_exec_size(brw, else_inst, brw_inst_exec_size(brw, if_inst));
1408
1409 /* Patch IF -> ELSE */
1410 if (brw->gen < 6) {
1411 brw_inst_set_gen4_jump_count(brw, if_inst,
1412 br * (else_inst - if_inst));
1413 brw_inst_set_gen4_pop_count(brw, if_inst, 0);
1414 } else if (brw->gen == 6) {
1415 brw_inst_set_gen6_jump_count(brw, if_inst,
1416 br * (else_inst - if_inst + 1));
1417 }
1418
1419 /* Patch ELSE -> ENDIF */
1420 if (brw->gen < 6) {
1421 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1422 * matching ENDIF.
1423 */
1424 brw_inst_set_gen4_jump_count(brw, else_inst,
1425 br * (endif_inst - else_inst + 1));
1426 brw_inst_set_gen4_pop_count(brw, else_inst, 1);
1427 } else if (brw->gen == 6) {
1428 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1429 brw_inst_set_gen6_jump_count(brw, else_inst,
1430 br * (endif_inst - else_inst));
1431 } else {
1432 /* The IF instruction's JIP should point just past the ELSE */
1433 brw_inst_set_jip(brw, if_inst, br * (else_inst - if_inst + 1));
1434 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1435 brw_inst_set_uip(brw, if_inst, br * (endif_inst - if_inst));
1436 brw_inst_set_jip(brw, else_inst, br * (endif_inst - else_inst));
1437 if (brw->gen >= 8) {
1438 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1439 * should point to ENDIF.
1440 */
1441 brw_inst_set_uip(brw, else_inst, br * (endif_inst - else_inst));
1442 }
1443 }
1444 }
1445 }
1446
1447 void
1448 brw_ELSE(struct brw_compile *p)
1449 {
1450 struct brw_context *brw = p->brw;
1451 brw_inst *insn;
1452
1453 insn = next_insn(p, BRW_OPCODE_ELSE);
1454
1455 if (brw->gen < 6) {
1456 brw_set_dest(p, insn, brw_ip_reg());
1457 brw_set_src0(p, insn, brw_ip_reg());
1458 brw_set_src1(p, insn, brw_imm_d(0x0));
1459 } else if (brw->gen == 6) {
1460 brw_set_dest(p, insn, brw_imm_w(0));
1461 brw_inst_set_gen6_jump_count(brw, insn, 0);
1462 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1463 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1464 } else {
1465 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1466 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1467 brw_set_src1(p, insn, brw_imm_ud(0));
1468 brw_inst_set_jip(brw, insn, 0);
1469 brw_inst_set_uip(brw, insn, 0);
1470 }
1471
1472 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1473 brw_inst_set_mask_control(brw, insn, BRW_MASK_ENABLE);
1474 if (!p->single_program_flow && brw->gen < 6)
1475 brw_inst_set_thread_control(brw, insn, BRW_THREAD_SWITCH);
1476
1477 push_if_stack(p, insn);
1478 }
1479
1480 void
1481 brw_ENDIF(struct brw_compile *p)
1482 {
1483 struct brw_context *brw = p->brw;
1484 brw_inst *insn = NULL;
1485 brw_inst *else_inst = NULL;
1486 brw_inst *if_inst = NULL;
1487 brw_inst *tmp;
1488 bool emit_endif = true;
1489
1490 /* In single program flow mode, we can express IF and ELSE instructions
1491 * equivalently as ADD instructions that operate on IP. On platforms prior
1492 * to Gen6, flow control instructions cause an implied thread switch, so
1493 * this is a significant savings.
1494 *
1495 * However, on Gen6, writing to IP doesn't work in single program flow mode
1496 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1497 * not be updated by non-flow control instructions."). And on later
1498 * platforms, there is no significant benefit to converting control flow
1499 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1500 * Gen5.
1501 */
1502 if (brw->gen < 6 && p->single_program_flow)
1503 emit_endif = false;
1504
1505 /*
1506 * A single next_insn() may change the base adress of instruction store
1507 * memory(p->store), so call it first before referencing the instruction
1508 * store pointer from an index
1509 */
1510 if (emit_endif)
1511 insn = next_insn(p, BRW_OPCODE_ENDIF);
1512
1513 /* Pop the IF and (optional) ELSE instructions from the stack */
1514 p->if_depth_in_loop[p->loop_stack_depth]--;
1515 tmp = pop_if_stack(p);
1516 if (brw_inst_opcode(brw, tmp) == BRW_OPCODE_ELSE) {
1517 else_inst = tmp;
1518 tmp = pop_if_stack(p);
1519 }
1520 if_inst = tmp;
1521
1522 if (!emit_endif) {
1523 /* ENDIF is useless; don't bother emitting it. */
1524 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1525 return;
1526 }
1527
1528 if (brw->gen < 6) {
1529 brw_set_dest(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
1530 brw_set_src0(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
1531 brw_set_src1(p, insn, brw_imm_d(0x0));
1532 } else if (brw->gen == 6) {
1533 brw_set_dest(p, insn, brw_imm_w(0));
1534 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1535 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1536 } else {
1537 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1538 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1539 brw_set_src1(p, insn, brw_imm_ud(0));
1540 }
1541
1542 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1543 brw_inst_set_mask_control(brw, insn, BRW_MASK_ENABLE);
1544 if (brw->gen < 6)
1545 brw_inst_set_thread_control(brw, insn, BRW_THREAD_SWITCH);
1546
1547 /* Also pop item off the stack in the endif instruction: */
1548 if (brw->gen < 6) {
1549 brw_inst_set_gen4_jump_count(brw, insn, 0);
1550 brw_inst_set_gen4_pop_count(brw, insn, 1);
1551 } else if (brw->gen == 6) {
1552 brw_inst_set_gen6_jump_count(brw, insn, 2);
1553 } else {
1554 brw_inst_set_jip(brw, insn, 2);
1555 }
1556 patch_IF_ELSE(p, if_inst, else_inst, insn);
1557 }
1558
1559 brw_inst *
1560 brw_BREAK(struct brw_compile *p)
1561 {
1562 struct brw_context *brw = p->brw;
1563 brw_inst *insn;
1564
1565 insn = next_insn(p, BRW_OPCODE_BREAK);
1566 if (brw->gen >= 6) {
1567 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1568 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1569 brw_set_src1(p, insn, brw_imm_d(0x0));
1570 } else {
1571 brw_set_dest(p, insn, brw_ip_reg());
1572 brw_set_src0(p, insn, brw_ip_reg());
1573 brw_set_src1(p, insn, brw_imm_d(0x0));
1574 brw_inst_set_gen4_pop_count(brw, insn,
1575 p->if_depth_in_loop[p->loop_stack_depth]);
1576 }
1577 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1578 brw_inst_set_exec_size(brw, insn, p->compressed ? BRW_EXECUTE_16
1579 : BRW_EXECUTE_8);
1580
1581 return insn;
1582 }
1583
1584 brw_inst *
1585 brw_CONT(struct brw_compile *p)
1586 {
1587 const struct brw_context *brw = p->brw;
1588 brw_inst *insn;
1589
1590 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1591 brw_set_dest(p, insn, brw_ip_reg());
1592 brw_set_src0(p, insn, brw_ip_reg());
1593 brw_set_src1(p, insn, brw_imm_d(0x0));
1594
1595 if (brw->gen < 6) {
1596 brw_inst_set_gen4_pop_count(brw, insn,
1597 p->if_depth_in_loop[p->loop_stack_depth]);
1598 }
1599 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1600 brw_inst_set_exec_size(brw, insn, p->compressed ? BRW_EXECUTE_16
1601 : BRW_EXECUTE_8);
1602 return insn;
1603 }
1604
1605 brw_inst *
1606 gen6_HALT(struct brw_compile *p)
1607 {
1608 const struct brw_context *brw = p->brw;
1609 brw_inst *insn;
1610
1611 insn = next_insn(p, BRW_OPCODE_HALT);
1612 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1613 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1614 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1615
1616 if (p->compressed) {
1617 brw_inst_set_exec_size(brw, insn, BRW_EXECUTE_16);
1618 } else {
1619 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1620 brw_inst_set_exec_size(brw, insn, BRW_EXECUTE_8);
1621 }
1622 return insn;
1623 }
1624
1625 /* DO/WHILE loop:
1626 *
1627 * The DO/WHILE is just an unterminated loop -- break or continue are
1628 * used for control within the loop. We have a few ways they can be
1629 * done.
1630 *
1631 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1632 * jip and no DO instruction.
1633 *
1634 * For non-uniform control flow pre-gen6, there's a DO instruction to
1635 * push the mask, and a WHILE to jump back, and BREAK to get out and
1636 * pop the mask.
1637 *
1638 * For gen6, there's no more mask stack, so no need for DO. WHILE
1639 * just points back to the first instruction of the loop.
1640 */
1641 brw_inst *
1642 brw_DO(struct brw_compile *p, unsigned execute_size)
1643 {
1644 struct brw_context *brw = p->brw;
1645
1646 if (brw->gen >= 6 || p->single_program_flow) {
1647 push_loop_stack(p, &p->store[p->nr_insn]);
1648 return &p->store[p->nr_insn];
1649 } else {
1650 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1651
1652 push_loop_stack(p, insn);
1653
1654 /* Override the defaults for this instruction:
1655 */
1656 brw_set_dest(p, insn, brw_null_reg());
1657 brw_set_src0(p, insn, brw_null_reg());
1658 brw_set_src1(p, insn, brw_null_reg());
1659
1660 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1661 brw_inst_set_exec_size(brw, insn, execute_size);
1662 brw_inst_set_pred_control(brw, insn, BRW_PREDICATE_NONE);
1663
1664 return insn;
1665 }
1666 }
1667
1668 /**
1669 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1670 * instruction here.
1671 *
1672 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1673 * nesting, since it can always just point to the end of the block/current loop.
1674 */
1675 static void
1676 brw_patch_break_cont(struct brw_compile *p, brw_inst *while_inst)
1677 {
1678 struct brw_context *brw = p->brw;
1679 brw_inst *do_inst = get_inner_do_insn(p);
1680 brw_inst *inst;
1681 unsigned br = brw_jump_scale(brw);
1682
1683 assert(brw->gen < 6);
1684
1685 for (inst = while_inst - 1; inst != do_inst; inst--) {
1686 /* If the jump count is != 0, that means that this instruction has already
1687 * been patched because it's part of a loop inside of the one we're
1688 * patching.
1689 */
1690 if (brw_inst_opcode(brw, inst) == BRW_OPCODE_BREAK &&
1691 brw_inst_gen4_jump_count(brw, inst) == 0) {
1692 brw_inst_set_gen4_jump_count(brw, inst, br*((while_inst - inst) + 1));
1693 } else if (brw_inst_opcode(brw, inst) == BRW_OPCODE_CONTINUE &&
1694 brw_inst_gen4_jump_count(brw, inst) == 0) {
1695 brw_inst_set_gen4_jump_count(brw, inst, br * (while_inst - inst));
1696 }
1697 }
1698 }
1699
1700 brw_inst *
1701 brw_WHILE(struct brw_compile *p)
1702 {
1703 struct brw_context *brw = p->brw;
1704 brw_inst *insn, *do_insn;
1705 unsigned br = brw_jump_scale(brw);
1706
1707 if (brw->gen >= 7) {
1708 insn = next_insn(p, BRW_OPCODE_WHILE);
1709 do_insn = get_inner_do_insn(p);
1710
1711 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1712 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1713 brw_set_src1(p, insn, brw_imm_ud(0));
1714 brw_inst_set_jip(brw, insn, br * (do_insn - insn));
1715
1716 brw_inst_set_exec_size(brw, insn, p->compressed ? BRW_EXECUTE_16
1717 : BRW_EXECUTE_8);
1718 } else if (brw->gen == 6) {
1719 insn = next_insn(p, BRW_OPCODE_WHILE);
1720 do_insn = get_inner_do_insn(p);
1721
1722 brw_set_dest(p, insn, brw_imm_w(0));
1723 brw_inst_set_gen6_jump_count(brw, insn, br * (do_insn - insn));
1724 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1725 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1726
1727 brw_inst_set_exec_size(brw, insn, p->compressed ? BRW_EXECUTE_16
1728 : BRW_EXECUTE_8);
1729 } else {
1730 if (p->single_program_flow) {
1731 insn = next_insn(p, BRW_OPCODE_ADD);
1732 do_insn = get_inner_do_insn(p);
1733
1734 brw_set_dest(p, insn, brw_ip_reg());
1735 brw_set_src0(p, insn, brw_ip_reg());
1736 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1737 brw_inst_set_exec_size(brw, insn, BRW_EXECUTE_1);
1738 } else {
1739 insn = next_insn(p, BRW_OPCODE_WHILE);
1740 do_insn = get_inner_do_insn(p);
1741
1742 assert(brw_inst_opcode(brw, do_insn) == BRW_OPCODE_DO);
1743
1744 brw_set_dest(p, insn, brw_ip_reg());
1745 brw_set_src0(p, insn, brw_ip_reg());
1746 brw_set_src1(p, insn, brw_imm_d(0));
1747
1748 brw_inst_set_exec_size(brw, insn, brw_inst_exec_size(brw, do_insn));
1749 brw_inst_set_gen4_jump_count(brw, insn, br * (do_insn - insn + 1));
1750 brw_inst_set_gen4_pop_count(brw, insn, 0);
1751
1752 brw_patch_break_cont(p, insn);
1753 }
1754 }
1755 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1756
1757 p->loop_stack_depth--;
1758
1759 return insn;
1760 }
1761
1762 /* FORWARD JUMPS:
1763 */
1764 void brw_land_fwd_jump(struct brw_compile *p, int jmp_insn_idx)
1765 {
1766 struct brw_context *brw = p->brw;
1767 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1768 unsigned jmpi = 1;
1769
1770 if (brw->gen >= 5)
1771 jmpi = 2;
1772
1773 assert(brw_inst_opcode(brw, jmp_insn) == BRW_OPCODE_JMPI);
1774 assert(brw_inst_src1_reg_file(brw, jmp_insn) == BRW_IMMEDIATE_VALUE);
1775
1776 brw_inst_set_gen4_jump_count(brw, jmp_insn,
1777 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1778 }
1779
1780 /* To integrate with the above, it makes sense that the comparison
1781 * instruction should populate the flag register. It might be simpler
1782 * just to use the flag reg for most WM tasks?
1783 */
1784 void brw_CMP(struct brw_compile *p,
1785 struct brw_reg dest,
1786 unsigned conditional,
1787 struct brw_reg src0,
1788 struct brw_reg src1)
1789 {
1790 struct brw_context *brw = p->brw;
1791 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1792
1793 if (brw->gen >= 8) {
1794 /* The CMP instruction appears to behave erratically for floating point
1795 * sources unless the destination type is also float. Overriding it to
1796 * match src0 makes it work in all cases.
1797 */
1798 dest.type = src0.type;
1799 }
1800
1801 brw_inst_set_cond_modifier(brw, insn, conditional);
1802 brw_set_dest(p, insn, dest);
1803 brw_set_src0(p, insn, src0);
1804 brw_set_src1(p, insn, src1);
1805
1806 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1807 * page says:
1808 * "Any CMP instruction with a null destination must use a {switch}."
1809 *
1810 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1811 * mentioned on their work-arounds pages.
1812 */
1813 if (brw->gen == 7) {
1814 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1815 dest.nr == BRW_ARF_NULL) {
1816 brw_inst_set_thread_control(brw, insn, BRW_THREAD_SWITCH);
1817 }
1818 }
1819 }
1820
1821 /***********************************************************************
1822 * Helpers for the various SEND message types:
1823 */
1824
1825 /** Extended math function, float[8].
1826 */
1827 void gen4_math(struct brw_compile *p,
1828 struct brw_reg dest,
1829 unsigned function,
1830 unsigned msg_reg_nr,
1831 struct brw_reg src,
1832 unsigned data_type,
1833 unsigned precision )
1834 {
1835 struct brw_context *brw = p->brw;
1836 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1837
1838 assert(brw->gen < 6);
1839
1840 /* Example code doesn't set predicate_control for send
1841 * instructions.
1842 */
1843 brw_inst_set_pred_control(brw, insn, 0);
1844 brw_inst_set_base_mrf(brw, insn, msg_reg_nr);
1845
1846 brw_set_dest(p, insn, dest);
1847 brw_set_src0(p, insn, src);
1848 brw_set_math_message(p,
1849 insn,
1850 function,
1851 src.type == BRW_REGISTER_TYPE_D,
1852 precision,
1853 data_type);
1854 }
1855
1856 void gen6_math(struct brw_compile *p,
1857 struct brw_reg dest,
1858 unsigned function,
1859 struct brw_reg src0,
1860 struct brw_reg src1)
1861 {
1862 struct brw_context *brw = p->brw;
1863 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1864
1865 assert(brw->gen >= 6);
1866
1867 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1868 (brw->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1869 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
1870 (brw->gen >= 8 && src0.file == BRW_IMMEDIATE_VALUE));
1871
1872 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1873 if (brw->gen == 6) {
1874 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1875 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1876 }
1877
1878 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1879 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1880 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1881 assert(src0.type != BRW_REGISTER_TYPE_F);
1882 assert(src1.type != BRW_REGISTER_TYPE_F);
1883 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1884 (brw->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1885 } else {
1886 assert(src0.type == BRW_REGISTER_TYPE_F);
1887 assert(src1.type == BRW_REGISTER_TYPE_F);
1888 if (function == BRW_MATH_FUNCTION_POW) {
1889 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1890 (brw->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1891 } else {
1892 assert(src1.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1893 src1.nr == BRW_ARF_NULL);
1894 }
1895 }
1896
1897 /* Source modifiers are ignored for extended math instructions on Gen6. */
1898 if (brw->gen == 6) {
1899 assert(!src0.negate);
1900 assert(!src0.abs);
1901 assert(!src1.negate);
1902 assert(!src1.abs);
1903 }
1904
1905 brw_inst_set_math_function(brw, insn, function);
1906
1907 brw_set_dest(p, insn, dest);
1908 brw_set_src0(p, insn, src0);
1909 brw_set_src1(p, insn, src1);
1910 }
1911
1912
1913 /**
1914 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1915 * using a constant offset per channel.
1916 *
1917 * The offset must be aligned to oword size (16 bytes). Used for
1918 * register spilling.
1919 */
1920 void brw_oword_block_write_scratch(struct brw_compile *p,
1921 struct brw_reg mrf,
1922 int num_regs,
1923 unsigned offset)
1924 {
1925 struct brw_context *brw = p->brw;
1926 uint32_t msg_control, msg_type;
1927 int mlen;
1928
1929 if (brw->gen >= 6)
1930 offset /= 16;
1931
1932 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1933
1934 if (num_regs == 1) {
1935 msg_control = BRW_DATAPORT_OWORD_BLOCK_2_OWORDS;
1936 mlen = 2;
1937 } else {
1938 msg_control = BRW_DATAPORT_OWORD_BLOCK_4_OWORDS;
1939 mlen = 3;
1940 }
1941
1942 /* Set up the message header. This is g0, with g0.2 filled with
1943 * the offset. We don't want to leave our offset around in g0 or
1944 * it'll screw up texture samples, so set it up inside the message
1945 * reg.
1946 */
1947 {
1948 brw_push_insn_state(p);
1949 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1950 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1951
1952 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1953
1954 /* set message header global offset field (reg 0, element 2) */
1955 brw_MOV(p,
1956 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
1957 mrf.nr,
1958 2), BRW_REGISTER_TYPE_UD),
1959 brw_imm_ud(offset));
1960
1961 brw_pop_insn_state(p);
1962 }
1963
1964 {
1965 struct brw_reg dest;
1966 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1967 int send_commit_msg;
1968 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
1969 BRW_REGISTER_TYPE_UW);
1970
1971 if (brw_inst_qtr_control(brw, insn) != BRW_COMPRESSION_NONE) {
1972 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1973 src_header = vec16(src_header);
1974 }
1975 assert(brw_inst_pred_control(brw, insn) == BRW_PREDICATE_NONE);
1976 if (brw->gen < 6)
1977 brw_inst_set_base_mrf(brw, insn, mrf.nr);
1978
1979 /* Until gen6, writes followed by reads from the same location
1980 * are not guaranteed to be ordered unless write_commit is set.
1981 * If set, then a no-op write is issued to the destination
1982 * register to set a dependency, and a read from the destination
1983 * can be used to ensure the ordering.
1984 *
1985 * For gen6, only writes between different threads need ordering
1986 * protection. Our use of DP writes is all about register
1987 * spilling within a thread.
1988 */
1989 if (brw->gen >= 6) {
1990 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
1991 send_commit_msg = 0;
1992 } else {
1993 dest = src_header;
1994 send_commit_msg = 1;
1995 }
1996
1997 brw_set_dest(p, insn, dest);
1998 if (brw->gen >= 6) {
1999 brw_set_src0(p, insn, mrf);
2000 } else {
2001 brw_set_src0(p, insn, brw_null_reg());
2002 }
2003
2004 if (brw->gen >= 6)
2005 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2006 else
2007 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2008
2009 brw_set_dp_write_message(p,
2010 insn,
2011 255, /* binding table index (255=stateless) */
2012 msg_control,
2013 msg_type,
2014 mlen,
2015 true, /* header_present */
2016 0, /* not a render target */
2017 send_commit_msg, /* response_length */
2018 0, /* eot */
2019 send_commit_msg);
2020 }
2021 }
2022
2023
2024 /**
2025 * Read a block of owords (half a GRF each) from the scratch buffer
2026 * using a constant index per channel.
2027 *
2028 * Offset must be aligned to oword size (16 bytes). Used for register
2029 * spilling.
2030 */
2031 void
2032 brw_oword_block_read_scratch(struct brw_compile *p,
2033 struct brw_reg dest,
2034 struct brw_reg mrf,
2035 int num_regs,
2036 unsigned offset)
2037 {
2038 struct brw_context *brw = p->brw;
2039 uint32_t msg_control;
2040 int rlen;
2041
2042 if (brw->gen >= 6)
2043 offset /= 16;
2044
2045 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2046 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2047
2048 if (num_regs == 1) {
2049 msg_control = BRW_DATAPORT_OWORD_BLOCK_2_OWORDS;
2050 rlen = 1;
2051 } else {
2052 msg_control = BRW_DATAPORT_OWORD_BLOCK_4_OWORDS;
2053 rlen = 2;
2054 }
2055
2056 {
2057 brw_push_insn_state(p);
2058 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2059 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2060
2061 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2062
2063 /* set message header global offset field (reg 0, element 2) */
2064 brw_MOV(p,
2065 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2066 mrf.nr,
2067 2), BRW_REGISTER_TYPE_UD),
2068 brw_imm_ud(offset));
2069
2070 brw_pop_insn_state(p);
2071 }
2072
2073 {
2074 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2075
2076 assert(brw_inst_pred_control(brw, insn) == 0);
2077 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
2078
2079 brw_set_dest(p, insn, dest); /* UW? */
2080 if (brw->gen >= 6) {
2081 brw_set_src0(p, insn, mrf);
2082 } else {
2083 brw_set_src0(p, insn, brw_null_reg());
2084 brw_inst_set_base_mrf(brw, insn, mrf.nr);
2085 }
2086
2087 brw_set_dp_read_message(p,
2088 insn,
2089 255, /* binding table index (255=stateless) */
2090 msg_control,
2091 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2092 BRW_DATAPORT_READ_TARGET_RENDER_CACHE,
2093 1, /* msg_length */
2094 true, /* header_present */
2095 rlen);
2096 }
2097 }
2098
2099 void
2100 gen7_block_read_scratch(struct brw_compile *p,
2101 struct brw_reg dest,
2102 int num_regs,
2103 unsigned offset)
2104 {
2105 const struct brw_context *brw = p->brw;
2106 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2107 assert(brw_inst_pred_control(brw, insn) == BRW_PREDICATE_NONE);
2108
2109 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
2110 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2111
2112 /* The HW requires that the header is present; this is to get the g0.5
2113 * scratch offset.
2114 */
2115 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2116
2117 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2118 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2119 * is 32 bytes, which happens to be the size of a register.
2120 */
2121 offset /= REG_SIZE;
2122 assert(offset < (1 << 12));
2123
2124 gen7_set_dp_scratch_message(p, insn,
2125 false, /* scratch read */
2126 false, /* OWords */
2127 false, /* invalidate after read */
2128 num_regs,
2129 offset,
2130 1, /* mlen: just g0 */
2131 num_regs, /* rlen */
2132 true); /* header present */
2133 }
2134
2135 /**
2136 * Read a float[4] vector from the data port Data Cache (const buffer).
2137 * Location (in buffer) should be a multiple of 16.
2138 * Used for fetching shader constants.
2139 */
2140 void brw_oword_block_read(struct brw_compile *p,
2141 struct brw_reg dest,
2142 struct brw_reg mrf,
2143 uint32_t offset,
2144 uint32_t bind_table_index)
2145 {
2146 struct brw_context *brw = p->brw;
2147
2148 /* On newer hardware, offset is in units of owords. */
2149 if (brw->gen >= 6)
2150 offset /= 16;
2151
2152 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2153
2154 brw_push_insn_state(p);
2155 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2156 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2157 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2158
2159 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2160
2161 /* set message header global offset field (reg 0, element 2) */
2162 brw_MOV(p,
2163 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2164 mrf.nr,
2165 2), BRW_REGISTER_TYPE_UD),
2166 brw_imm_ud(offset));
2167
2168 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2169
2170 /* cast dest to a uword[8] vector */
2171 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2172
2173 brw_set_dest(p, insn, dest);
2174 if (brw->gen >= 6) {
2175 brw_set_src0(p, insn, mrf);
2176 } else {
2177 brw_set_src0(p, insn, brw_null_reg());
2178 brw_inst_set_base_mrf(brw, insn, mrf.nr);
2179 }
2180
2181 brw_set_dp_read_message(p,
2182 insn,
2183 bind_table_index,
2184 BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW,
2185 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2186 BRW_DATAPORT_READ_TARGET_DATA_CACHE,
2187 1, /* msg_length */
2188 true, /* header_present */
2189 1); /* response_length (1 reg, 2 owords!) */
2190
2191 brw_pop_insn_state(p);
2192 }
2193
2194
2195 void brw_fb_WRITE(struct brw_compile *p,
2196 int dispatch_width,
2197 unsigned msg_reg_nr,
2198 struct brw_reg src0,
2199 unsigned msg_control,
2200 unsigned binding_table_index,
2201 unsigned msg_length,
2202 unsigned response_length,
2203 bool eot,
2204 bool header_present)
2205 {
2206 struct brw_context *brw = p->brw;
2207 brw_inst *insn;
2208 unsigned msg_type;
2209 struct brw_reg dest;
2210
2211 if (dispatch_width == 16)
2212 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2213 else
2214 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2215
2216 if (brw->gen >= 6) {
2217 insn = next_insn(p, BRW_OPCODE_SENDC);
2218 } else {
2219 insn = next_insn(p, BRW_OPCODE_SEND);
2220 }
2221 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
2222
2223 if (brw->gen >= 6) {
2224 /* headerless version, just submit color payload */
2225 src0 = brw_message_reg(msg_reg_nr);
2226
2227 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2228 } else {
2229 brw_inst_set_base_mrf(brw, insn, msg_reg_nr);
2230
2231 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2232 }
2233
2234 brw_set_dest(p, insn, dest);
2235 brw_set_src0(p, insn, src0);
2236 brw_set_dp_write_message(p,
2237 insn,
2238 binding_table_index,
2239 msg_control,
2240 msg_type,
2241 msg_length,
2242 header_present,
2243 eot, /* last render target write */
2244 response_length,
2245 eot,
2246 0 /* send_commit_msg */);
2247 }
2248
2249
2250 /**
2251 * Texture sample instruction.
2252 * Note: the msg_type plus msg_length values determine exactly what kind
2253 * of sampling operation is performed. See volume 4, page 161 of docs.
2254 */
2255 void brw_SAMPLE(struct brw_compile *p,
2256 struct brw_reg dest,
2257 unsigned msg_reg_nr,
2258 struct brw_reg src0,
2259 unsigned binding_table_index,
2260 unsigned sampler,
2261 unsigned msg_type,
2262 unsigned response_length,
2263 unsigned msg_length,
2264 unsigned header_present,
2265 unsigned simd_mode,
2266 unsigned return_format)
2267 {
2268 struct brw_context *brw = p->brw;
2269 brw_inst *insn;
2270
2271 if (msg_reg_nr != -1)
2272 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2273
2274 insn = next_insn(p, BRW_OPCODE_SEND);
2275 brw_inst_set_pred_control(brw, insn, BRW_PREDICATE_NONE); /* XXX */
2276
2277 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2278 *
2279 * "Instruction compression is not allowed for this instruction (that
2280 * is, send). The hardware behavior is undefined if this instruction is
2281 * set as compressed. However, compress control can be set to "SecHalf"
2282 * to affect the EMask generation."
2283 *
2284 * No similar wording is found in later PRMs, but there are examples
2285 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2286 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2287 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2288 */
2289 if (brw_inst_qtr_control(brw, insn) != BRW_COMPRESSION_2NDHALF)
2290 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
2291
2292 if (brw->gen < 6)
2293 brw_inst_set_base_mrf(brw, insn, msg_reg_nr);
2294
2295 brw_set_dest(p, insn, dest);
2296 brw_set_src0(p, insn, src0);
2297 brw_set_sampler_message(p, insn,
2298 binding_table_index,
2299 sampler,
2300 msg_type,
2301 response_length,
2302 msg_length,
2303 header_present,
2304 simd_mode,
2305 return_format);
2306 }
2307
2308 /* All these variables are pretty confusing - we might be better off
2309 * using bitmasks and macros for this, in the old style. Or perhaps
2310 * just having the caller instantiate the fields in dword3 itself.
2311 */
2312 void brw_urb_WRITE(struct brw_compile *p,
2313 struct brw_reg dest,
2314 unsigned msg_reg_nr,
2315 struct brw_reg src0,
2316 enum brw_urb_write_flags flags,
2317 unsigned msg_length,
2318 unsigned response_length,
2319 unsigned offset,
2320 unsigned swizzle)
2321 {
2322 struct brw_context *brw = p->brw;
2323 brw_inst *insn;
2324
2325 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2326
2327 if (brw->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2328 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2329 brw_push_insn_state(p);
2330 brw_set_default_access_mode(p, BRW_ALIGN_1);
2331 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2332 brw_MOV(p, get_element_ud(brw_message_reg(msg_reg_nr), 5),
2333 brw_imm_ud(0xff00));
2334 brw_pop_insn_state(p);
2335 }
2336
2337 insn = next_insn(p, BRW_OPCODE_SEND);
2338
2339 assert(msg_length < BRW_MAX_MRF);
2340
2341 brw_set_dest(p, insn, dest);
2342 brw_set_src0(p, insn, src0);
2343 brw_set_src1(p, insn, brw_imm_d(0));
2344
2345 if (brw->gen < 6)
2346 brw_inst_set_base_mrf(brw, insn, msg_reg_nr);
2347
2348 brw_set_urb_message(p,
2349 insn,
2350 flags,
2351 msg_length,
2352 response_length,
2353 offset,
2354 swizzle);
2355 }
2356
2357 static int
2358 brw_find_next_block_end(struct brw_compile *p, int start_offset)
2359 {
2360 int offset;
2361 void *store = p->store;
2362 const struct brw_context *brw = p->brw;
2363
2364 for (offset = next_offset(brw, store, start_offset);
2365 offset < p->next_insn_offset;
2366 offset = next_offset(brw, store, offset)) {
2367 brw_inst *insn = store + offset;
2368
2369 switch (brw_inst_opcode(brw, insn)) {
2370 case BRW_OPCODE_ENDIF:
2371 case BRW_OPCODE_ELSE:
2372 case BRW_OPCODE_WHILE:
2373 case BRW_OPCODE_HALT:
2374 return offset;
2375 }
2376 }
2377
2378 return 0;
2379 }
2380
2381 /* There is no DO instruction on gen6, so to find the end of the loop
2382 * we have to see if the loop is jumping back before our start
2383 * instruction.
2384 */
2385 static int
2386 brw_find_loop_end(struct brw_compile *p, int start_offset)
2387 {
2388 struct brw_context *brw = p->brw;
2389 int offset;
2390 int scale = 16 / brw_jump_scale(brw);
2391 void *store = p->store;
2392
2393 assert(brw->gen >= 6);
2394
2395 /* Always start after the instruction (such as a WHILE) we're trying to fix
2396 * up.
2397 */
2398 for (offset = next_offset(brw, store, start_offset);
2399 offset < p->next_insn_offset;
2400 offset = next_offset(brw, store, offset)) {
2401 brw_inst *insn = store + offset;
2402
2403 if (brw_inst_opcode(brw, insn) == BRW_OPCODE_WHILE) {
2404 int jip = brw->gen == 6 ? brw_inst_gen6_jump_count(brw, insn)
2405 : brw_inst_jip(brw, insn);
2406 if (offset + jip * scale <= start_offset)
2407 return offset;
2408 }
2409 }
2410 assert(!"not reached");
2411 return start_offset;
2412 }
2413
2414 /* After program generation, go back and update the UIP and JIP of
2415 * BREAK, CONT, and HALT instructions to their correct locations.
2416 */
2417 void
2418 brw_set_uip_jip(struct brw_compile *p)
2419 {
2420 struct brw_context *brw = p->brw;
2421 int offset;
2422 int br = brw_jump_scale(brw);
2423 int scale = 16 / br;
2424 void *store = p->store;
2425
2426 if (brw->gen < 6)
2427 return;
2428
2429 for (offset = 0; offset < p->next_insn_offset;
2430 offset = next_offset(brw, store, offset)) {
2431 brw_inst *insn = store + offset;
2432
2433 if (brw_inst_cmpt_control(brw, insn)) {
2434 /* Fixups for compacted BREAK/CONTINUE not supported yet. */
2435 assert(brw_inst_opcode(brw, insn) != BRW_OPCODE_BREAK &&
2436 brw_inst_opcode(brw, insn) != BRW_OPCODE_CONTINUE &&
2437 brw_inst_opcode(brw, insn) != BRW_OPCODE_HALT);
2438 continue;
2439 }
2440
2441 int block_end_offset = brw_find_next_block_end(p, offset);
2442 switch (brw_inst_opcode(brw, insn)) {
2443 case BRW_OPCODE_BREAK:
2444 assert(block_end_offset != 0);
2445 brw_inst_set_jip(brw, insn, (block_end_offset - offset) / scale);
2446 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2447 brw_inst_set_uip(brw, insn,
2448 (brw_find_loop_end(p, offset) - offset +
2449 (brw->gen == 6 ? 16 : 0)) / scale);
2450 break;
2451 case BRW_OPCODE_CONTINUE:
2452 assert(block_end_offset != 0);
2453 brw_inst_set_jip(brw, insn, (block_end_offset - offset) / scale);
2454 brw_inst_set_uip(brw, insn,
2455 (brw_find_loop_end(p, offset) - offset) / scale);
2456
2457 assert(brw_inst_uip(brw, insn) != 0);
2458 assert(brw_inst_jip(brw, insn) != 0);
2459 break;
2460
2461 case BRW_OPCODE_ENDIF:
2462 if (block_end_offset == 0)
2463 brw_inst_set_jip(brw, insn, 1 * br);
2464 else
2465 brw_inst_set_jip(brw, insn, (block_end_offset - offset) / scale);
2466 break;
2467
2468 case BRW_OPCODE_HALT:
2469 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2470 *
2471 * "In case of the halt instruction not inside any conditional
2472 * code block, the value of <JIP> and <UIP> should be the
2473 * same. In case of the halt instruction inside conditional code
2474 * block, the <UIP> should be the end of the program, and the
2475 * <JIP> should be end of the most inner conditional code block."
2476 *
2477 * The uip will have already been set by whoever set up the
2478 * instruction.
2479 */
2480 if (block_end_offset == 0) {
2481 brw_inst_set_jip(brw, insn, brw_inst_uip(brw, insn));
2482 } else {
2483 brw_inst_set_jip(brw, insn, (block_end_offset - offset) / scale);
2484 }
2485 assert(brw_inst_uip(brw, insn) != 0);
2486 assert(brw_inst_jip(brw, insn) != 0);
2487 break;
2488 }
2489 }
2490 }
2491
2492 void brw_ff_sync(struct brw_compile *p,
2493 struct brw_reg dest,
2494 unsigned msg_reg_nr,
2495 struct brw_reg src0,
2496 bool allocate,
2497 unsigned response_length,
2498 bool eot)
2499 {
2500 struct brw_context *brw = p->brw;
2501 brw_inst *insn;
2502
2503 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2504
2505 insn = next_insn(p, BRW_OPCODE_SEND);
2506 brw_set_dest(p, insn, dest);
2507 brw_set_src0(p, insn, src0);
2508 brw_set_src1(p, insn, brw_imm_d(0));
2509
2510 if (brw->gen < 6)
2511 brw_inst_set_base_mrf(brw, insn, msg_reg_nr);
2512
2513 brw_set_ff_sync_message(p,
2514 insn,
2515 allocate,
2516 response_length,
2517 eot);
2518 }
2519
2520 /**
2521 * Emit the SEND instruction necessary to generate stream output data on Gen6
2522 * (for transform feedback).
2523 *
2524 * If send_commit_msg is true, this is the last piece of stream output data
2525 * from this thread, so send the data as a committed write. According to the
2526 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2527 *
2528 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2529 * writes are complete by sending the final write as a committed write."
2530 */
2531 void
2532 brw_svb_write(struct brw_compile *p,
2533 struct brw_reg dest,
2534 unsigned msg_reg_nr,
2535 struct brw_reg src0,
2536 unsigned binding_table_index,
2537 bool send_commit_msg)
2538 {
2539 brw_inst *insn;
2540
2541 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2542
2543 insn = next_insn(p, BRW_OPCODE_SEND);
2544 brw_set_dest(p, insn, dest);
2545 brw_set_src0(p, insn, src0);
2546 brw_set_src1(p, insn, brw_imm_d(0));
2547 brw_set_dp_write_message(p, insn,
2548 binding_table_index,
2549 0, /* msg_control: ignored */
2550 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2551 1, /* msg_length */
2552 true, /* header_present */
2553 0, /* last_render_target: ignored */
2554 send_commit_msg, /* response_length */
2555 0, /* end_of_thread */
2556 send_commit_msg); /* send_commit_msg */
2557 }
2558
2559 static void
2560 brw_set_dp_untyped_atomic_message(struct brw_compile *p,
2561 brw_inst *insn,
2562 unsigned atomic_op,
2563 unsigned bind_table_index,
2564 unsigned msg_length,
2565 unsigned response_length,
2566 bool header_present)
2567 {
2568 const struct brw_context *brw = p->brw;
2569
2570 unsigned msg_control =
2571 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2572 (response_length ? 1 << 5 : 0); /* Return data expected */
2573
2574 if (brw->gen >= 8 || brw->is_haswell) {
2575 brw_set_message_descriptor(p, insn, HSW_SFID_DATAPORT_DATA_CACHE_1,
2576 msg_length, response_length,
2577 header_present, false);
2578
2579
2580 if (brw_inst_access_mode(brw, insn) == BRW_ALIGN_1) {
2581 if (brw_inst_exec_size(brw, insn) != BRW_EXECUTE_16)
2582 msg_control |= 1 << 4; /* SIMD8 mode */
2583
2584 brw_inst_set_dp_msg_type(brw, insn,
2585 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2586 } else {
2587 brw_inst_set_dp_msg_type(brw, insn,
2588 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2589 }
2590 } else {
2591 brw_set_message_descriptor(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
2592 msg_length, response_length,
2593 header_present, false);
2594
2595 brw_inst_set_dp_msg_type(brw, insn, GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2596
2597 if (brw_inst_exec_size(brw, insn) != BRW_EXECUTE_16)
2598 msg_control |= 1 << 4; /* SIMD8 mode */
2599 }
2600
2601 brw_inst_set_binding_table_index(brw, insn, bind_table_index);
2602 brw_inst_set_dp_msg_control(brw, insn, msg_control);
2603 }
2604
2605 void
2606 brw_untyped_atomic(struct brw_compile *p,
2607 struct brw_reg dest,
2608 struct brw_reg mrf,
2609 unsigned atomic_op,
2610 unsigned bind_table_index,
2611 unsigned msg_length,
2612 unsigned response_length) {
2613 const struct brw_context *brw = p->brw;
2614 brw_inst *insn = brw_next_insn(p, BRW_OPCODE_SEND);
2615
2616 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UD));
2617 brw_set_src0(p, insn, retype(mrf, BRW_REGISTER_TYPE_UD));
2618 brw_set_src1(p, insn, brw_imm_d(0));
2619 brw_set_dp_untyped_atomic_message(
2620 p, insn, atomic_op, bind_table_index, msg_length, response_length,
2621 brw_inst_access_mode(brw, insn) == BRW_ALIGN_1);
2622 }
2623
2624 static void
2625 brw_set_dp_untyped_surface_read_message(struct brw_compile *p,
2626 brw_inst *insn,
2627 unsigned bind_table_index,
2628 unsigned msg_length,
2629 unsigned response_length,
2630 bool header_present)
2631 {
2632 const struct brw_context *brw = p->brw;
2633 const unsigned dispatch_width =
2634 (brw_inst_exec_size(brw, insn) == BRW_EXECUTE_16 ? 16 : 8);
2635 const unsigned num_channels = response_length / (dispatch_width / 8);
2636
2637 if (brw->gen >= 8 || brw->is_haswell) {
2638 brw_set_message_descriptor(p, insn, HSW_SFID_DATAPORT_DATA_CACHE_1,
2639 msg_length, response_length,
2640 header_present, false);
2641
2642 brw_inst_set_dp_msg_type(brw, insn,
2643 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ);
2644 } else {
2645 brw_set_message_descriptor(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
2646 msg_length, response_length,
2647 header_present, false);
2648
2649 brw_inst_set_dp_msg_type(brw, insn,
2650 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ);
2651 }
2652
2653 /* Set mask of 32-bit channels to drop. */
2654 unsigned msg_control = (0xf & (0xf << num_channels));
2655
2656 if (brw_inst_access_mode(brw, insn) == BRW_ALIGN_1) {
2657 if (dispatch_width == 16)
2658 msg_control |= 1 << 4; /* SIMD16 mode */
2659 else
2660 msg_control |= 2 << 4; /* SIMD8 mode */
2661 }
2662
2663 brw_inst_set_binding_table_index(brw, insn, bind_table_index);
2664 brw_inst_set_dp_msg_control(brw, insn, msg_control);
2665 }
2666
2667 void
2668 brw_untyped_surface_read(struct brw_compile *p,
2669 struct brw_reg dest,
2670 struct brw_reg mrf,
2671 unsigned bind_table_index,
2672 unsigned msg_length,
2673 unsigned response_length)
2674 {
2675 const struct brw_context *brw = p->brw;
2676 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2677
2678 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UD));
2679 brw_set_src0(p, insn, retype(mrf, BRW_REGISTER_TYPE_UD));
2680 brw_set_dp_untyped_surface_read_message(
2681 p, insn, bind_table_index, msg_length, response_length,
2682 brw_inst_access_mode(brw, insn) == BRW_ALIGN_1);
2683 }
2684
2685 void
2686 brw_pixel_interpolator_query(struct brw_compile *p,
2687 struct brw_reg dest,
2688 struct brw_reg mrf,
2689 bool noperspective,
2690 unsigned mode,
2691 unsigned data,
2692 unsigned msg_length,
2693 unsigned response_length)
2694 {
2695 const struct brw_context *brw = p->brw;
2696 struct brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2697
2698 brw_set_dest(p, insn, dest);
2699 brw_set_src0(p, insn, mrf);
2700 brw_set_message_descriptor(p, insn, GEN7_SFID_PIXEL_INTERPOLATOR,
2701 msg_length, response_length,
2702 false /* header is never present for PI */,
2703 false);
2704
2705 brw_inst_set_pi_simd_mode(
2706 brw, insn, brw_inst_exec_size(brw, insn) == BRW_EXECUTE_16);
2707 brw_inst_set_pi_slot_group(brw, insn, 0); /* zero unless 32/64px dispatch */
2708 brw_inst_set_pi_nopersp(brw, insn, noperspective);
2709 brw_inst_set_pi_message_type(brw, insn, mode);
2710 brw_inst_set_pi_message_data(brw, insn, data);
2711 }
2712
2713 /**
2714 * This instruction is generated as a single-channel align1 instruction by
2715 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
2716 *
2717 * We can't use the typed atomic op in the FS because that has the execution
2718 * mask ANDed with the pixel mask, but we just want to write the one dword for
2719 * all the pixels.
2720 *
2721 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
2722 * one u32. So we use the same untyped atomic write message as the pixel
2723 * shader.
2724 *
2725 * The untyped atomic operation requires a BUFFER surface type with RAW
2726 * format, and is only accessible through the legacy DATA_CACHE dataport
2727 * messages.
2728 */
2729 void brw_shader_time_add(struct brw_compile *p,
2730 struct brw_reg payload,
2731 uint32_t surf_index)
2732 {
2733 assert(p->brw->gen >= 7);
2734
2735 brw_push_insn_state(p);
2736 brw_set_default_access_mode(p, BRW_ALIGN_1);
2737 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2738 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
2739 brw_pop_insn_state(p);
2740
2741 /* We use brw_vec1_reg and unmasked because we want to increment the given
2742 * offset only once.
2743 */
2744 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
2745 BRW_ARF_NULL, 0));
2746 brw_set_src0(p, send, brw_vec1_reg(payload.file,
2747 payload.nr, 0));
2748 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, surf_index,
2749 2 /* message length */,
2750 0 /* response length */,
2751 false /* header present */);
2752 }