i965: Fix sampler state pointer adjustment for nonconst samplers
[mesa.git] / src / mesa / drivers / dri / i965 / brw_eu_emit.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33 #include "brw_context.h"
34 #include "brw_defines.h"
35 #include "brw_eu.h"
36
37 #include "util/ralloc.h"
38
39 /***********************************************************************
40 * Internal helper for constructing instructions
41 */
42
43 static void guess_execution_size(struct brw_compile *p,
44 brw_inst *insn,
45 struct brw_reg reg)
46 {
47 const struct brw_context *brw = p->brw;
48
49 if (reg.width == BRW_WIDTH_8 && p->compressed) {
50 brw_inst_set_exec_size(brw, insn, BRW_EXECUTE_16);
51 } else {
52 /* Register width definitions are compatible with BRW_EXECUTE_* enums. */
53 brw_inst_set_exec_size(brw, insn, reg.width);
54 }
55 }
56
57
58 /**
59 * Prior to Sandybridge, the SEND instruction accepted non-MRF source
60 * registers, implicitly moving the operand to a message register.
61 *
62 * On Sandybridge, this is no longer the case. This function performs the
63 * explicit move; it should be called before emitting a SEND instruction.
64 */
65 void
66 gen6_resolve_implied_move(struct brw_compile *p,
67 struct brw_reg *src,
68 unsigned msg_reg_nr)
69 {
70 struct brw_context *brw = p->brw;
71 if (brw->gen < 6)
72 return;
73
74 if (src->file == BRW_MESSAGE_REGISTER_FILE)
75 return;
76
77 if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
78 brw_push_insn_state(p);
79 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
80 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
81 brw_MOV(p, retype(brw_message_reg(msg_reg_nr), BRW_REGISTER_TYPE_UD),
82 retype(*src, BRW_REGISTER_TYPE_UD));
83 brw_pop_insn_state(p);
84 }
85 *src = brw_message_reg(msg_reg_nr);
86 }
87
88 static void
89 gen7_convert_mrf_to_grf(struct brw_compile *p, struct brw_reg *reg)
90 {
91 /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
92 * "The send with EOT should use register space R112-R127 for <src>. This is
93 * to enable loading of a new thread into the same slot while the message
94 * with EOT for current thread is pending dispatch."
95 *
96 * Since we're pretending to have 16 MRFs anyway, we may as well use the
97 * registers required for messages with EOT.
98 */
99 struct brw_context *brw = p->brw;
100 if (brw->gen >= 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
101 reg->file = BRW_GENERAL_REGISTER_FILE;
102 reg->nr += GEN7_MRF_HACK_START;
103 }
104 }
105
106 /**
107 * Convert a brw_reg_type enumeration value into the hardware representation.
108 *
109 * The hardware encoding may depend on whether the value is an immediate.
110 */
111 unsigned
112 brw_reg_type_to_hw_type(const struct brw_context *brw,
113 enum brw_reg_type type, unsigned file)
114 {
115 if (file == BRW_IMMEDIATE_VALUE) {
116 const static int imm_hw_types[] = {
117 [BRW_REGISTER_TYPE_UD] = BRW_HW_REG_TYPE_UD,
118 [BRW_REGISTER_TYPE_D] = BRW_HW_REG_TYPE_D,
119 [BRW_REGISTER_TYPE_UW] = BRW_HW_REG_TYPE_UW,
120 [BRW_REGISTER_TYPE_W] = BRW_HW_REG_TYPE_W,
121 [BRW_REGISTER_TYPE_F] = BRW_HW_REG_TYPE_F,
122 [BRW_REGISTER_TYPE_UB] = -1,
123 [BRW_REGISTER_TYPE_B] = -1,
124 [BRW_REGISTER_TYPE_UV] = BRW_HW_REG_IMM_TYPE_UV,
125 [BRW_REGISTER_TYPE_VF] = BRW_HW_REG_IMM_TYPE_VF,
126 [BRW_REGISTER_TYPE_V] = BRW_HW_REG_IMM_TYPE_V,
127 [BRW_REGISTER_TYPE_DF] = GEN8_HW_REG_IMM_TYPE_DF,
128 [BRW_REGISTER_TYPE_HF] = GEN8_HW_REG_IMM_TYPE_HF,
129 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_REG_TYPE_UQ,
130 [BRW_REGISTER_TYPE_Q] = GEN8_HW_REG_TYPE_Q,
131 };
132 assert(type < ARRAY_SIZE(imm_hw_types));
133 assert(imm_hw_types[type] != -1);
134 assert(brw->gen >= 8 || type < BRW_REGISTER_TYPE_DF);
135 return imm_hw_types[type];
136 } else {
137 /* Non-immediate registers */
138 const static int hw_types[] = {
139 [BRW_REGISTER_TYPE_UD] = BRW_HW_REG_TYPE_UD,
140 [BRW_REGISTER_TYPE_D] = BRW_HW_REG_TYPE_D,
141 [BRW_REGISTER_TYPE_UW] = BRW_HW_REG_TYPE_UW,
142 [BRW_REGISTER_TYPE_W] = BRW_HW_REG_TYPE_W,
143 [BRW_REGISTER_TYPE_UB] = BRW_HW_REG_NON_IMM_TYPE_UB,
144 [BRW_REGISTER_TYPE_B] = BRW_HW_REG_NON_IMM_TYPE_B,
145 [BRW_REGISTER_TYPE_F] = BRW_HW_REG_TYPE_F,
146 [BRW_REGISTER_TYPE_UV] = -1,
147 [BRW_REGISTER_TYPE_VF] = -1,
148 [BRW_REGISTER_TYPE_V] = -1,
149 [BRW_REGISTER_TYPE_DF] = GEN7_HW_REG_NON_IMM_TYPE_DF,
150 [BRW_REGISTER_TYPE_HF] = GEN8_HW_REG_NON_IMM_TYPE_HF,
151 [BRW_REGISTER_TYPE_UQ] = GEN8_HW_REG_TYPE_UQ,
152 [BRW_REGISTER_TYPE_Q] = GEN8_HW_REG_TYPE_Q,
153 };
154 assert(type < ARRAY_SIZE(hw_types));
155 assert(hw_types[type] != -1);
156 assert(brw->gen >= 7 || type < BRW_REGISTER_TYPE_DF);
157 assert(brw->gen >= 8 || type < BRW_REGISTER_TYPE_HF);
158 return hw_types[type];
159 }
160 }
161
162 void
163 brw_set_dest(struct brw_compile *p, brw_inst *inst, struct brw_reg dest)
164 {
165 const struct brw_context *brw = p->brw;
166
167 if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE &&
168 dest.file != BRW_MESSAGE_REGISTER_FILE)
169 assert(dest.nr < 128);
170
171 gen7_convert_mrf_to_grf(p, &dest);
172
173 brw_inst_set_dst_reg_file(brw, inst, dest.file);
174 brw_inst_set_dst_reg_type(brw, inst, brw_reg_type_to_hw_type(brw, dest.type,
175 dest.file));
176 brw_inst_set_dst_address_mode(brw, inst, dest.address_mode);
177
178 if (dest.address_mode == BRW_ADDRESS_DIRECT) {
179 brw_inst_set_dst_da_reg_nr(brw, inst, dest.nr);
180
181 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
182 brw_inst_set_dst_da1_subreg_nr(brw, inst, dest.subnr);
183 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
184 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
185 brw_inst_set_dst_hstride(brw, inst, dest.hstride);
186 } else {
187 brw_inst_set_dst_da16_subreg_nr(brw, inst, dest.subnr / 16);
188 brw_inst_set_da16_writemask(brw, inst, dest.dw1.bits.writemask);
189 if (dest.file == BRW_GENERAL_REGISTER_FILE ||
190 dest.file == BRW_MESSAGE_REGISTER_FILE) {
191 assert(dest.dw1.bits.writemask != 0);
192 }
193 /* From the Ivybridge PRM, Vol 4, Part 3, Section 5.2.4.1:
194 * Although Dst.HorzStride is a don't care for Align16, HW needs
195 * this to be programmed as "01".
196 */
197 brw_inst_set_dst_hstride(brw, inst, 1);
198 }
199 } else {
200 brw_inst_set_dst_ia_subreg_nr(brw, inst, dest.subnr);
201
202 /* These are different sizes in align1 vs align16:
203 */
204 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
205 brw_inst_set_dst_ia1_addr_imm(brw, inst,
206 dest.dw1.bits.indirect_offset);
207 if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
208 dest.hstride = BRW_HORIZONTAL_STRIDE_1;
209 brw_inst_set_dst_hstride(brw, inst, dest.hstride);
210 } else {
211 brw_inst_set_dst_ia16_addr_imm(brw, inst,
212 dest.dw1.bits.indirect_offset);
213 /* even ignored in da16, still need to set as '01' */
214 brw_inst_set_dst_hstride(brw, inst, 1);
215 }
216 }
217
218 /* NEW: Set the execution size based on dest.width and
219 * inst->compression_control:
220 */
221 guess_execution_size(p, inst, dest);
222 }
223
224 extern int reg_type_size[];
225
226 static void
227 validate_reg(const struct brw_context *brw, brw_inst *inst, struct brw_reg reg)
228 {
229 int hstride_for_reg[] = {0, 1, 2, 4};
230 int vstride_for_reg[] = {0, 1, 2, 4, 8, 16, 32, 64, 128, 256};
231 int width_for_reg[] = {1, 2, 4, 8, 16};
232 int execsize_for_reg[] = {1, 2, 4, 8, 16};
233 int width, hstride, vstride, execsize;
234
235 if (reg.file == BRW_IMMEDIATE_VALUE) {
236 /* 3.3.6: Region Parameters. Restriction: Immediate vectors
237 * mean the destination has to be 128-bit aligned and the
238 * destination horiz stride has to be a word.
239 */
240 if (reg.type == BRW_REGISTER_TYPE_V) {
241 assert(hstride_for_reg[brw_inst_dst_hstride(brw, inst)] *
242 reg_type_size[brw_inst_dst_reg_type(brw, inst)] == 2);
243 }
244
245 return;
246 }
247
248 if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
249 reg.file == BRW_ARF_NULL)
250 return;
251
252 assert(reg.hstride >= 0 && reg.hstride < Elements(hstride_for_reg));
253 hstride = hstride_for_reg[reg.hstride];
254
255 if (reg.vstride == 0xf) {
256 vstride = -1;
257 } else {
258 assert(reg.vstride >= 0 && reg.vstride < Elements(vstride_for_reg));
259 vstride = vstride_for_reg[reg.vstride];
260 }
261
262 assert(reg.width >= 0 && reg.width < Elements(width_for_reg));
263 width = width_for_reg[reg.width];
264
265 assert(brw_inst_exec_size(brw, inst) >= 0 &&
266 brw_inst_exec_size(brw, inst) < Elements(execsize_for_reg));
267 execsize = execsize_for_reg[brw_inst_exec_size(brw, inst)];
268
269 /* Restrictions from 3.3.10: Register Region Restrictions. */
270 /* 3. */
271 assert(execsize >= width);
272
273 /* 4. */
274 if (execsize == width && hstride != 0) {
275 assert(vstride == -1 || vstride == width * hstride);
276 }
277
278 /* 5. */
279 if (execsize == width && hstride == 0) {
280 /* no restriction on vstride. */
281 }
282
283 /* 6. */
284 if (width == 1) {
285 assert(hstride == 0);
286 }
287
288 /* 7. */
289 if (execsize == 1 && width == 1) {
290 assert(hstride == 0);
291 assert(vstride == 0);
292 }
293
294 /* 8. */
295 if (vstride == 0 && hstride == 0) {
296 assert(width == 1);
297 }
298
299 /* 10. Check destination issues. */
300 }
301
302 static bool
303 is_compactable_immediate(unsigned imm)
304 {
305 /* We get the low 12 bits as-is. */
306 imm &= ~0xfff;
307
308 /* We get one bit replicated through the top 20 bits. */
309 return imm == 0 || imm == 0xfffff000;
310 }
311
312 void
313 brw_set_src0(struct brw_compile *p, brw_inst *inst, struct brw_reg reg)
314 {
315 struct brw_context *brw = p->brw;
316
317 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
318 assert(reg.nr < 128);
319
320 gen7_convert_mrf_to_grf(p, &reg);
321
322 if (brw->gen >= 6 && (brw_inst_opcode(brw, inst) == BRW_OPCODE_SEND ||
323 brw_inst_opcode(brw, inst) == BRW_OPCODE_SENDC)) {
324 /* Any source modifiers or regions will be ignored, since this just
325 * identifies the MRF/GRF to start reading the message contents from.
326 * Check for some likely failures.
327 */
328 assert(!reg.negate);
329 assert(!reg.abs);
330 assert(reg.address_mode == BRW_ADDRESS_DIRECT);
331 }
332
333 validate_reg(brw, inst, reg);
334
335 brw_inst_set_src0_reg_file(brw, inst, reg.file);
336 brw_inst_set_src0_reg_type(brw, inst,
337 brw_reg_type_to_hw_type(brw, reg.type, reg.file));
338 brw_inst_set_src0_abs(brw, inst, reg.abs);
339 brw_inst_set_src0_negate(brw, inst, reg.negate);
340 brw_inst_set_src0_address_mode(brw, inst, reg.address_mode);
341
342 if (reg.file == BRW_IMMEDIATE_VALUE) {
343 brw_inst_set_imm_ud(brw, inst, reg.dw1.ud);
344
345 /* The Bspec's section titled "Non-present Operands" claims that if src0
346 * is an immediate that src1's type must be the same as that of src0.
347 *
348 * The SNB+ DataTypeIndex instruction compaction tables contain mappings
349 * that do not follow this rule. E.g., from the IVB/HSW table:
350 *
351 * DataTypeIndex 18-Bit Mapping Mapped Meaning
352 * 3 001000001011111101 r:f | i:vf | a:ud | <1> | dir |
353 *
354 * And from the SNB table:
355 *
356 * DataTypeIndex 18-Bit Mapping Mapped Meaning
357 * 8 001000000111101100 a:w | i:w | a:ud | <1> | dir |
358 *
359 * Neither of these cause warnings from the simulator when used,
360 * compacted or otherwise. In fact, all compaction mappings that have an
361 * immediate in src0 use a:ud for src1.
362 *
363 * The GM45 instruction compaction tables do not contain mapped meanings
364 * so it's not clear whether it has the restriction. We'll assume it was
365 * lifted on SNB. (FINISHME: decode the GM45 tables and check.)
366 */
367 brw_inst_set_src1_reg_file(brw, inst, BRW_ARCHITECTURE_REGISTER_FILE);
368 if (brw->gen < 6) {
369 brw_inst_set_src1_reg_type(brw, inst,
370 brw_inst_src0_reg_type(brw, inst));
371 } else {
372 brw_inst_set_src1_reg_type(brw, inst, BRW_HW_REG_TYPE_UD);
373 }
374
375 /* Compacted instructions only have 12-bits (plus 1 for the other 20)
376 * for immediate values. Presumably the hardware engineers realized
377 * that the only useful floating-point value that could be represented
378 * in this format is 0.0, which can also be represented as a VF-typed
379 * immediate, so they gave us the previously mentioned mapping on IVB+.
380 *
381 * Strangely, we do have a mapping for imm:f in src1, so we don't need
382 * to do this there.
383 *
384 * If we see a 0.0:F, change the type to VF so that it can be compacted.
385 */
386 if (brw_inst_imm_ud(brw, inst) == 0x0 &&
387 brw_inst_src0_reg_type(brw, inst) == BRW_HW_REG_TYPE_F) {
388 brw_inst_set_src0_reg_type(brw, inst, BRW_HW_REG_IMM_TYPE_VF);
389 }
390
391 /* There are no mappings for dst:d | i:d, so if the immediate is suitable
392 * set the types to :UD so the instruction can be compacted.
393 */
394 if (is_compactable_immediate(brw_inst_imm_ud(brw, inst)) &&
395 brw_inst_cond_modifier(brw, inst) == BRW_CONDITIONAL_NONE &&
396 brw_inst_src0_reg_type(brw, inst) == BRW_HW_REG_TYPE_D &&
397 brw_inst_dst_reg_type(brw, inst) == BRW_HW_REG_TYPE_D) {
398 brw_inst_set_src0_reg_type(brw, inst, BRW_HW_REG_TYPE_UD);
399 brw_inst_set_dst_reg_type(brw, inst, BRW_HW_REG_TYPE_UD);
400 }
401 } else {
402 if (reg.address_mode == BRW_ADDRESS_DIRECT) {
403 brw_inst_set_src0_da_reg_nr(brw, inst, reg.nr);
404 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
405 brw_inst_set_src0_da1_subreg_nr(brw, inst, reg.subnr);
406 } else {
407 brw_inst_set_src0_da16_subreg_nr(brw, inst, reg.subnr / 16);
408 }
409 } else {
410 brw_inst_set_src0_ia_subreg_nr(brw, inst, reg.subnr);
411
412 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
413 brw_inst_set_src0_ia1_addr_imm(brw, inst, reg.dw1.bits.indirect_offset);
414 } else {
415 brw_inst_set_src0_ia_subreg_nr(brw, inst, reg.dw1.bits.indirect_offset);
416 }
417 }
418
419 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
420 if (reg.width == BRW_WIDTH_1 &&
421 brw_inst_exec_size(brw, inst) == BRW_EXECUTE_1) {
422 brw_inst_set_src0_hstride(brw, inst, BRW_HORIZONTAL_STRIDE_0);
423 brw_inst_set_src0_width(brw, inst, BRW_WIDTH_1);
424 brw_inst_set_src0_vstride(brw, inst, BRW_VERTICAL_STRIDE_0);
425 } else {
426 brw_inst_set_src0_hstride(brw, inst, reg.hstride);
427 brw_inst_set_src0_width(brw, inst, reg.width);
428 brw_inst_set_src0_vstride(brw, inst, reg.vstride);
429 }
430 } else {
431 brw_inst_set_src0_da16_swiz_x(brw, inst,
432 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_X));
433 brw_inst_set_src0_da16_swiz_y(brw, inst,
434 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Y));
435 brw_inst_set_src0_da16_swiz_z(brw, inst,
436 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Z));
437 brw_inst_set_src0_da16_swiz_w(brw, inst,
438 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_W));
439
440 /* This is an oddity of the fact we're using the same
441 * descriptions for registers in align_16 as align_1:
442 */
443 if (reg.vstride == BRW_VERTICAL_STRIDE_8)
444 brw_inst_set_src0_vstride(brw, inst, BRW_VERTICAL_STRIDE_4);
445 else
446 brw_inst_set_src0_vstride(brw, inst, reg.vstride);
447 }
448 }
449 }
450
451
452 void
453 brw_set_src1(struct brw_compile *p, brw_inst *inst, struct brw_reg reg)
454 {
455 const struct brw_context *brw = p->brw;
456 assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
457
458 if (reg.file != BRW_ARCHITECTURE_REGISTER_FILE)
459 assert(reg.nr < 128);
460
461 gen7_convert_mrf_to_grf(p, &reg);
462
463 validate_reg(brw, inst, reg);
464
465 brw_inst_set_src1_reg_file(brw, inst, reg.file);
466 brw_inst_set_src1_reg_type(brw, inst,
467 brw_reg_type_to_hw_type(brw, reg.type, reg.file));
468 brw_inst_set_src1_abs(brw, inst, reg.abs);
469 brw_inst_set_src1_negate(brw, inst, reg.negate);
470
471 /* Only src1 can be immediate in two-argument instructions.
472 */
473 assert(brw_inst_src0_reg_file(brw, inst) != BRW_IMMEDIATE_VALUE);
474
475 if (reg.file == BRW_IMMEDIATE_VALUE) {
476 brw_inst_set_imm_ud(brw, inst, reg.dw1.ud);
477 } else {
478 /* This is a hardware restriction, which may or may not be lifted
479 * in the future:
480 */
481 assert (reg.address_mode == BRW_ADDRESS_DIRECT);
482 /* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
483
484 brw_inst_set_src1_da_reg_nr(brw, inst, reg.nr);
485 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
486 brw_inst_set_src1_da1_subreg_nr(brw, inst, reg.subnr);
487 } else {
488 brw_inst_set_src1_da16_subreg_nr(brw, inst, reg.subnr / 16);
489 }
490
491 if (brw_inst_access_mode(brw, inst) == BRW_ALIGN_1) {
492 if (reg.width == BRW_WIDTH_1 &&
493 brw_inst_exec_size(brw, inst) == BRW_EXECUTE_1) {
494 brw_inst_set_src1_hstride(brw, inst, BRW_HORIZONTAL_STRIDE_0);
495 brw_inst_set_src1_width(brw, inst, BRW_WIDTH_1);
496 brw_inst_set_src1_vstride(brw, inst, BRW_VERTICAL_STRIDE_0);
497 } else {
498 brw_inst_set_src1_hstride(brw, inst, reg.hstride);
499 brw_inst_set_src1_width(brw, inst, reg.width);
500 brw_inst_set_src1_vstride(brw, inst, reg.vstride);
501 }
502 } else {
503 brw_inst_set_src1_da16_swiz_x(brw, inst,
504 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_X));
505 brw_inst_set_src1_da16_swiz_y(brw, inst,
506 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Y));
507 brw_inst_set_src1_da16_swiz_z(brw, inst,
508 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Z));
509 brw_inst_set_src1_da16_swiz_w(brw, inst,
510 BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_W));
511
512 /* This is an oddity of the fact we're using the same
513 * descriptions for registers in align_16 as align_1:
514 */
515 if (reg.vstride == BRW_VERTICAL_STRIDE_8)
516 brw_inst_set_src1_vstride(brw, inst, BRW_VERTICAL_STRIDE_4);
517 else
518 brw_inst_set_src1_vstride(brw, inst, reg.vstride);
519 }
520 }
521 }
522
523 /**
524 * Set the Message Descriptor and Extended Message Descriptor fields
525 * for SEND messages.
526 *
527 * \note This zeroes out the Function Control bits, so it must be called
528 * \b before filling out any message-specific data. Callers can
529 * choose not to fill in irrelevant bits; they will be zero.
530 */
531 static void
532 brw_set_message_descriptor(struct brw_compile *p,
533 brw_inst *inst,
534 enum brw_message_target sfid,
535 unsigned msg_length,
536 unsigned response_length,
537 bool header_present,
538 bool end_of_thread)
539 {
540 struct brw_context *brw = p->brw;
541
542 brw_set_src1(p, inst, brw_imm_d(0));
543
544 /* For indirect sends, `inst` will not be the SEND/SENDC instruction
545 * itself; instead, it will be a MOV/OR into the address register.
546 *
547 * In this case, we avoid setting the extended message descriptor bits,
548 * since they go on the later SEND/SENDC instead and if set here would
549 * instead clobber the conditionalmod bits.
550 */
551 unsigned opcode = brw_inst_opcode(brw, inst);
552 if (opcode == BRW_OPCODE_SEND || opcode == BRW_OPCODE_SENDC) {
553 brw_inst_set_sfid(brw, inst, sfid);
554 }
555
556 brw_inst_set_mlen(brw, inst, msg_length);
557 brw_inst_set_rlen(brw, inst, response_length);
558 brw_inst_set_eot(brw, inst, end_of_thread);
559
560 if (brw->gen >= 5) {
561 brw_inst_set_header_present(brw, inst, header_present);
562 }
563 }
564
565 static void brw_set_math_message( struct brw_compile *p,
566 brw_inst *inst,
567 unsigned function,
568 unsigned integer_type,
569 bool low_precision,
570 unsigned dataType )
571 {
572 struct brw_context *brw = p->brw;
573 unsigned msg_length;
574 unsigned response_length;
575
576 /* Infer message length from the function */
577 switch (function) {
578 case BRW_MATH_FUNCTION_POW:
579 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
580 case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
581 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
582 msg_length = 2;
583 break;
584 default:
585 msg_length = 1;
586 break;
587 }
588
589 /* Infer response length from the function */
590 switch (function) {
591 case BRW_MATH_FUNCTION_SINCOS:
592 case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
593 response_length = 2;
594 break;
595 default:
596 response_length = 1;
597 break;
598 }
599
600
601 brw_set_message_descriptor(p, inst, BRW_SFID_MATH,
602 msg_length, response_length, false, false);
603 brw_inst_set_math_msg_function(brw, inst, function);
604 brw_inst_set_math_msg_signed_int(brw, inst, integer_type);
605 brw_inst_set_math_msg_precision(brw, inst, low_precision);
606 brw_inst_set_math_msg_saturate(brw, inst, brw_inst_saturate(brw, inst));
607 brw_inst_set_math_msg_data_type(brw, inst, dataType);
608 brw_inst_set_saturate(brw, inst, 0);
609 }
610
611
612 static void brw_set_ff_sync_message(struct brw_compile *p,
613 brw_inst *insn,
614 bool allocate,
615 unsigned response_length,
616 bool end_of_thread)
617 {
618 const struct brw_context *brw = p->brw;
619
620 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
621 1, response_length, true, end_of_thread);
622 brw_inst_set_urb_opcode(brw, insn, 1); /* FF_SYNC */
623 brw_inst_set_urb_allocate(brw, insn, allocate);
624 /* The following fields are not used by FF_SYNC: */
625 brw_inst_set_urb_global_offset(brw, insn, 0);
626 brw_inst_set_urb_swizzle_control(brw, insn, 0);
627 brw_inst_set_urb_used(brw, insn, 0);
628 brw_inst_set_urb_complete(brw, insn, 0);
629 }
630
631 static void brw_set_urb_message( struct brw_compile *p,
632 brw_inst *insn,
633 enum brw_urb_write_flags flags,
634 unsigned msg_length,
635 unsigned response_length,
636 unsigned offset,
637 unsigned swizzle_control )
638 {
639 struct brw_context *brw = p->brw;
640
641 assert(brw->gen < 7 || swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
642 assert(brw->gen < 7 || !(flags & BRW_URB_WRITE_ALLOCATE));
643 assert(brw->gen >= 7 || !(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
644
645 brw_set_message_descriptor(p, insn, BRW_SFID_URB,
646 msg_length, response_length, true,
647 flags & BRW_URB_WRITE_EOT);
648
649 if (flags & BRW_URB_WRITE_OWORD) {
650 assert(msg_length == 2); /* header + one OWORD of data */
651 brw_inst_set_urb_opcode(brw, insn, BRW_URB_OPCODE_WRITE_OWORD);
652 } else {
653 brw_inst_set_urb_opcode(brw, insn, BRW_URB_OPCODE_WRITE_HWORD);
654 }
655
656 brw_inst_set_urb_global_offset(brw, insn, offset);
657 brw_inst_set_urb_swizzle_control(brw, insn, swizzle_control);
658
659 if (brw->gen < 8) {
660 brw_inst_set_urb_complete(brw, insn, !!(flags & BRW_URB_WRITE_COMPLETE));
661 }
662
663 if (brw->gen < 7) {
664 brw_inst_set_urb_allocate(brw, insn, !!(flags & BRW_URB_WRITE_ALLOCATE));
665 brw_inst_set_urb_used(brw, insn, !(flags & BRW_URB_WRITE_UNUSED));
666 } else {
667 brw_inst_set_urb_per_slot_offset(brw, insn,
668 !!(flags & BRW_URB_WRITE_PER_SLOT_OFFSET));
669 }
670 }
671
672 void
673 brw_set_dp_write_message(struct brw_compile *p,
674 brw_inst *insn,
675 unsigned binding_table_index,
676 unsigned msg_control,
677 unsigned msg_type,
678 unsigned msg_length,
679 bool header_present,
680 unsigned last_render_target,
681 unsigned response_length,
682 unsigned end_of_thread,
683 unsigned send_commit_msg)
684 {
685 struct brw_context *brw = p->brw;
686 unsigned sfid;
687
688 if (brw->gen >= 7) {
689 /* Use the Render Cache for RT writes; otherwise use the Data Cache */
690 if (msg_type == GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE)
691 sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
692 else
693 sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
694 } else if (brw->gen == 6) {
695 /* Use the render cache for all write messages. */
696 sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
697 } else {
698 sfid = BRW_SFID_DATAPORT_WRITE;
699 }
700
701 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
702 header_present, end_of_thread);
703
704 brw_inst_set_binding_table_index(brw, insn, binding_table_index);
705 brw_inst_set_dp_write_msg_type(brw, insn, msg_type);
706 brw_inst_set_dp_write_msg_control(brw, insn, msg_control);
707 brw_inst_set_rt_last(brw, insn, last_render_target);
708 if (brw->gen < 7) {
709 brw_inst_set_dp_write_commit(brw, insn, send_commit_msg);
710 }
711 }
712
713 void
714 brw_set_dp_read_message(struct brw_compile *p,
715 brw_inst *insn,
716 unsigned binding_table_index,
717 unsigned msg_control,
718 unsigned msg_type,
719 unsigned target_cache,
720 unsigned msg_length,
721 bool header_present,
722 unsigned response_length)
723 {
724 struct brw_context *brw = p->brw;
725 unsigned sfid;
726
727 if (brw->gen >= 7) {
728 sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
729 } else if (brw->gen == 6) {
730 if (target_cache == BRW_DATAPORT_READ_TARGET_RENDER_CACHE)
731 sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
732 else
733 sfid = GEN6_SFID_DATAPORT_SAMPLER_CACHE;
734 } else {
735 sfid = BRW_SFID_DATAPORT_READ;
736 }
737
738 brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
739 header_present, false);
740
741 brw_inst_set_binding_table_index(brw, insn, binding_table_index);
742 brw_inst_set_dp_read_msg_type(brw, insn, msg_type);
743 brw_inst_set_dp_read_msg_control(brw, insn, msg_control);
744 if (brw->gen < 6)
745 brw_inst_set_dp_read_target_cache(brw, insn, target_cache);
746 }
747
748 void
749 brw_set_sampler_message(struct brw_compile *p,
750 brw_inst *inst,
751 unsigned binding_table_index,
752 unsigned sampler,
753 unsigned msg_type,
754 unsigned response_length,
755 unsigned msg_length,
756 unsigned header_present,
757 unsigned simd_mode,
758 unsigned return_format)
759 {
760 struct brw_context *brw = p->brw;
761
762 brw_set_message_descriptor(p, inst, BRW_SFID_SAMPLER, msg_length,
763 response_length, header_present, false);
764
765 brw_inst_set_binding_table_index(brw, inst, binding_table_index);
766 brw_inst_set_sampler(brw, inst, sampler);
767 brw_inst_set_sampler_msg_type(brw, inst, msg_type);
768 if (brw->gen >= 5) {
769 brw_inst_set_sampler_simd_mode(brw, inst, simd_mode);
770 } else if (brw->gen == 4 && !brw->is_g4x) {
771 brw_inst_set_sampler_return_format(brw, inst, return_format);
772 }
773 }
774
775 void brw_set_indirect_send_descriptor(struct brw_compile *p,
776 brw_inst *insn,
777 unsigned sfid,
778 struct brw_reg descriptor)
779 {
780 /* Only a0.0 may be used as SEND's descriptor operand. */
781 assert(descriptor.file == BRW_ARCHITECTURE_REGISTER_FILE);
782 assert(descriptor.type == BRW_REGISTER_TYPE_UD);
783 assert(descriptor.nr == BRW_ARF_ADDRESS);
784 assert(descriptor.subnr == 0);
785
786 brw_set_message_descriptor(p, insn, sfid, 0, 0, false, false);
787 brw_set_src1(p, insn, descriptor);
788 }
789
790 static void
791 gen7_set_dp_scratch_message(struct brw_compile *p,
792 brw_inst *inst,
793 bool write,
794 bool dword,
795 bool invalidate_after_read,
796 unsigned num_regs,
797 unsigned addr_offset,
798 unsigned mlen,
799 unsigned rlen,
800 bool header_present)
801 {
802 const struct brw_context *brw = p->brw;
803 assert(num_regs == 1 || num_regs == 2 || num_regs == 4 ||
804 (brw->gen >= 8 && num_regs == 8));
805 brw_set_message_descriptor(p, inst, GEN7_SFID_DATAPORT_DATA_CACHE,
806 mlen, rlen, header_present, false);
807 brw_inst_set_dp_category(brw, inst, 1); /* Scratch Block Read/Write msgs */
808 brw_inst_set_scratch_read_write(brw, inst, write);
809 brw_inst_set_scratch_type(brw, inst, dword);
810 brw_inst_set_scratch_invalidate_after_read(brw, inst, invalidate_after_read);
811 brw_inst_set_scratch_block_size(brw, inst, ffs(num_regs) - 1);
812 brw_inst_set_scratch_addr_offset(brw, inst, addr_offset);
813 }
814
815 #define next_insn brw_next_insn
816 brw_inst *
817 brw_next_insn(struct brw_compile *p, unsigned opcode)
818 {
819 const struct brw_context *brw = p->brw;
820 brw_inst *insn;
821
822 if (p->nr_insn + 1 > p->store_size) {
823 p->store_size <<= 1;
824 p->store = reralloc(p->mem_ctx, p->store, brw_inst, p->store_size);
825 }
826
827 p->next_insn_offset += 16;
828 insn = &p->store[p->nr_insn++];
829 memcpy(insn, p->current, sizeof(*insn));
830
831 brw_inst_set_opcode(brw, insn, opcode);
832 return insn;
833 }
834
835 static brw_inst *
836 brw_alu1(struct brw_compile *p, unsigned opcode,
837 struct brw_reg dest, struct brw_reg src)
838 {
839 brw_inst *insn = next_insn(p, opcode);
840 brw_set_dest(p, insn, dest);
841 brw_set_src0(p, insn, src);
842 return insn;
843 }
844
845 static brw_inst *
846 brw_alu2(struct brw_compile *p, unsigned opcode,
847 struct brw_reg dest, struct brw_reg src0, struct brw_reg src1)
848 {
849 brw_inst *insn = next_insn(p, opcode);
850 brw_set_dest(p, insn, dest);
851 brw_set_src0(p, insn, src0);
852 brw_set_src1(p, insn, src1);
853 return insn;
854 }
855
856 static int
857 get_3src_subreg_nr(struct brw_reg reg)
858 {
859 if (reg.vstride == BRW_VERTICAL_STRIDE_0) {
860 assert(brw_is_single_value_swizzle(reg.dw1.bits.swizzle));
861 return reg.subnr / 4 + BRW_GET_SWZ(reg.dw1.bits.swizzle, 0);
862 } else {
863 return reg.subnr / 4;
864 }
865 }
866
867 static brw_inst *
868 brw_alu3(struct brw_compile *p, unsigned opcode, struct brw_reg dest,
869 struct brw_reg src0, struct brw_reg src1, struct brw_reg src2)
870 {
871 struct brw_context *brw = p->brw;
872 brw_inst *inst = next_insn(p, opcode);
873
874 gen7_convert_mrf_to_grf(p, &dest);
875
876 assert(brw_inst_access_mode(brw, inst) == BRW_ALIGN_16);
877
878 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
879 dest.file == BRW_MESSAGE_REGISTER_FILE);
880 assert(dest.nr < 128);
881 assert(dest.address_mode == BRW_ADDRESS_DIRECT);
882 assert(dest.type == BRW_REGISTER_TYPE_F ||
883 dest.type == BRW_REGISTER_TYPE_D ||
884 dest.type == BRW_REGISTER_TYPE_UD);
885 if (brw->gen == 6) {
886 brw_inst_set_3src_dst_reg_file(brw, inst,
887 dest.file == BRW_MESSAGE_REGISTER_FILE);
888 }
889 brw_inst_set_3src_dst_reg_nr(brw, inst, dest.nr);
890 brw_inst_set_3src_dst_subreg_nr(brw, inst, dest.subnr / 16);
891 brw_inst_set_3src_dst_writemask(brw, inst, dest.dw1.bits.writemask);
892 guess_execution_size(p, inst, dest);
893
894 assert(src0.file == BRW_GENERAL_REGISTER_FILE);
895 assert(src0.address_mode == BRW_ADDRESS_DIRECT);
896 assert(src0.nr < 128);
897 brw_inst_set_3src_src0_swizzle(brw, inst, src0.dw1.bits.swizzle);
898 brw_inst_set_3src_src0_subreg_nr(brw, inst, get_3src_subreg_nr(src0));
899 brw_inst_set_3src_src0_reg_nr(brw, inst, src0.nr);
900 brw_inst_set_3src_src0_abs(brw, inst, src0.abs);
901 brw_inst_set_3src_src0_negate(brw, inst, src0.negate);
902 brw_inst_set_3src_src0_rep_ctrl(brw, inst,
903 src0.vstride == BRW_VERTICAL_STRIDE_0);
904
905 assert(src1.file == BRW_GENERAL_REGISTER_FILE);
906 assert(src1.address_mode == BRW_ADDRESS_DIRECT);
907 assert(src1.nr < 128);
908 brw_inst_set_3src_src1_swizzle(brw, inst, src1.dw1.bits.swizzle);
909 brw_inst_set_3src_src1_subreg_nr(brw, inst, get_3src_subreg_nr(src1));
910 brw_inst_set_3src_src1_reg_nr(brw, inst, src1.nr);
911 brw_inst_set_3src_src1_abs(brw, inst, src1.abs);
912 brw_inst_set_3src_src1_negate(brw, inst, src1.negate);
913 brw_inst_set_3src_src1_rep_ctrl(brw, inst,
914 src1.vstride == BRW_VERTICAL_STRIDE_0);
915
916 assert(src2.file == BRW_GENERAL_REGISTER_FILE);
917 assert(src2.address_mode == BRW_ADDRESS_DIRECT);
918 assert(src2.nr < 128);
919 brw_inst_set_3src_src2_swizzle(brw, inst, src2.dw1.bits.swizzle);
920 brw_inst_set_3src_src2_subreg_nr(brw, inst, get_3src_subreg_nr(src2));
921 brw_inst_set_3src_src2_reg_nr(brw, inst, src2.nr);
922 brw_inst_set_3src_src2_abs(brw, inst, src2.abs);
923 brw_inst_set_3src_src2_negate(brw, inst, src2.negate);
924 brw_inst_set_3src_src2_rep_ctrl(brw, inst,
925 src2.vstride == BRW_VERTICAL_STRIDE_0);
926
927 if (brw->gen >= 7) {
928 /* Set both the source and destination types based on dest.type,
929 * ignoring the source register types. The MAD and LRP emitters ensure
930 * that all four types are float. The BFE and BFI2 emitters, however,
931 * may send us mixed D and UD types and want us to ignore that and use
932 * the destination type.
933 */
934 switch (dest.type) {
935 case BRW_REGISTER_TYPE_F:
936 brw_inst_set_3src_src_type(brw, inst, BRW_3SRC_TYPE_F);
937 brw_inst_set_3src_dst_type(brw, inst, BRW_3SRC_TYPE_F);
938 break;
939 case BRW_REGISTER_TYPE_D:
940 brw_inst_set_3src_src_type(brw, inst, BRW_3SRC_TYPE_D);
941 brw_inst_set_3src_dst_type(brw, inst, BRW_3SRC_TYPE_D);
942 break;
943 case BRW_REGISTER_TYPE_UD:
944 brw_inst_set_3src_src_type(brw, inst, BRW_3SRC_TYPE_UD);
945 brw_inst_set_3src_dst_type(brw, inst, BRW_3SRC_TYPE_UD);
946 break;
947 }
948 }
949
950 return inst;
951 }
952
953
954 /***********************************************************************
955 * Convenience routines.
956 */
957 #define ALU1(OP) \
958 brw_inst *brw_##OP(struct brw_compile *p, \
959 struct brw_reg dest, \
960 struct brw_reg src0) \
961 { \
962 return brw_alu1(p, BRW_OPCODE_##OP, dest, src0); \
963 }
964
965 #define ALU2(OP) \
966 brw_inst *brw_##OP(struct brw_compile *p, \
967 struct brw_reg dest, \
968 struct brw_reg src0, \
969 struct brw_reg src1) \
970 { \
971 return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1); \
972 }
973
974 #define ALU3(OP) \
975 brw_inst *brw_##OP(struct brw_compile *p, \
976 struct brw_reg dest, \
977 struct brw_reg src0, \
978 struct brw_reg src1, \
979 struct brw_reg src2) \
980 { \
981 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
982 }
983
984 #define ALU3F(OP) \
985 brw_inst *brw_##OP(struct brw_compile *p, \
986 struct brw_reg dest, \
987 struct brw_reg src0, \
988 struct brw_reg src1, \
989 struct brw_reg src2) \
990 { \
991 assert(dest.type == BRW_REGISTER_TYPE_F); \
992 assert(src0.type == BRW_REGISTER_TYPE_F); \
993 assert(src1.type == BRW_REGISTER_TYPE_F); \
994 assert(src2.type == BRW_REGISTER_TYPE_F); \
995 return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
996 }
997
998 /* Rounding operations (other than RNDD) require two instructions - the first
999 * stores a rounded value (possibly the wrong way) in the dest register, but
1000 * also sets a per-channel "increment bit" in the flag register. A predicated
1001 * add of 1.0 fixes dest to contain the desired result.
1002 *
1003 * Sandybridge and later appear to round correctly without an ADD.
1004 */
1005 #define ROUND(OP) \
1006 void brw_##OP(struct brw_compile *p, \
1007 struct brw_reg dest, \
1008 struct brw_reg src) \
1009 { \
1010 struct brw_context *brw = p->brw; \
1011 brw_inst *rnd, *add; \
1012 rnd = next_insn(p, BRW_OPCODE_##OP); \
1013 brw_set_dest(p, rnd, dest); \
1014 brw_set_src0(p, rnd, src); \
1015 \
1016 if (brw->gen < 6) { \
1017 /* turn on round-increments */ \
1018 brw_inst_set_cond_modifier(brw, rnd, BRW_CONDITIONAL_R); \
1019 add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
1020 brw_inst_set_pred_control(brw, add, BRW_PREDICATE_NORMAL); \
1021 } \
1022 }
1023
1024
1025 ALU1(MOV)
1026 ALU2(SEL)
1027 ALU1(NOT)
1028 ALU2(AND)
1029 ALU2(OR)
1030 ALU2(XOR)
1031 ALU2(SHR)
1032 ALU2(SHL)
1033 ALU2(ASR)
1034 ALU1(FRC)
1035 ALU1(RNDD)
1036 ALU2(MAC)
1037 ALU2(MACH)
1038 ALU1(LZD)
1039 ALU2(DP4)
1040 ALU2(DPH)
1041 ALU2(DP3)
1042 ALU2(DP2)
1043 ALU2(LINE)
1044 ALU2(PLN)
1045 ALU3F(MAD)
1046 ALU3F(LRP)
1047 ALU1(BFREV)
1048 ALU3(BFE)
1049 ALU2(BFI1)
1050 ALU3(BFI2)
1051 ALU1(FBH)
1052 ALU1(FBL)
1053 ALU1(CBIT)
1054 ALU2(ADDC)
1055 ALU2(SUBB)
1056
1057 ROUND(RNDZ)
1058 ROUND(RNDE)
1059
1060
1061 brw_inst *
1062 brw_ADD(struct brw_compile *p, struct brw_reg dest,
1063 struct brw_reg src0, struct brw_reg src1)
1064 {
1065 /* 6.2.2: add */
1066 if (src0.type == BRW_REGISTER_TYPE_F ||
1067 (src0.file == BRW_IMMEDIATE_VALUE &&
1068 src0.type == BRW_REGISTER_TYPE_VF)) {
1069 assert(src1.type != BRW_REGISTER_TYPE_UD);
1070 assert(src1.type != BRW_REGISTER_TYPE_D);
1071 }
1072
1073 if (src1.type == BRW_REGISTER_TYPE_F ||
1074 (src1.file == BRW_IMMEDIATE_VALUE &&
1075 src1.type == BRW_REGISTER_TYPE_VF)) {
1076 assert(src0.type != BRW_REGISTER_TYPE_UD);
1077 assert(src0.type != BRW_REGISTER_TYPE_D);
1078 }
1079
1080 return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
1081 }
1082
1083 brw_inst *
1084 brw_AVG(struct brw_compile *p, struct brw_reg dest,
1085 struct brw_reg src0, struct brw_reg src1)
1086 {
1087 assert(dest.type == src0.type);
1088 assert(src0.type == src1.type);
1089 switch (src0.type) {
1090 case BRW_REGISTER_TYPE_B:
1091 case BRW_REGISTER_TYPE_UB:
1092 case BRW_REGISTER_TYPE_W:
1093 case BRW_REGISTER_TYPE_UW:
1094 case BRW_REGISTER_TYPE_D:
1095 case BRW_REGISTER_TYPE_UD:
1096 break;
1097 default:
1098 unreachable("Bad type for brw_AVG");
1099 }
1100
1101 return brw_alu2(p, BRW_OPCODE_AVG, dest, src0, src1);
1102 }
1103
1104 brw_inst *
1105 brw_MUL(struct brw_compile *p, struct brw_reg dest,
1106 struct brw_reg src0, struct brw_reg src1)
1107 {
1108 /* 6.32.38: mul */
1109 if (src0.type == BRW_REGISTER_TYPE_D ||
1110 src0.type == BRW_REGISTER_TYPE_UD ||
1111 src1.type == BRW_REGISTER_TYPE_D ||
1112 src1.type == BRW_REGISTER_TYPE_UD) {
1113 assert(dest.type != BRW_REGISTER_TYPE_F);
1114 }
1115
1116 if (src0.type == BRW_REGISTER_TYPE_F ||
1117 (src0.file == BRW_IMMEDIATE_VALUE &&
1118 src0.type == BRW_REGISTER_TYPE_VF)) {
1119 assert(src1.type != BRW_REGISTER_TYPE_UD);
1120 assert(src1.type != BRW_REGISTER_TYPE_D);
1121 }
1122
1123 if (src1.type == BRW_REGISTER_TYPE_F ||
1124 (src1.file == BRW_IMMEDIATE_VALUE &&
1125 src1.type == BRW_REGISTER_TYPE_VF)) {
1126 assert(src0.type != BRW_REGISTER_TYPE_UD);
1127 assert(src0.type != BRW_REGISTER_TYPE_D);
1128 }
1129
1130 assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1131 src0.nr != BRW_ARF_ACCUMULATOR);
1132 assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
1133 src1.nr != BRW_ARF_ACCUMULATOR);
1134
1135 return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
1136 }
1137
1138 brw_inst *
1139 brw_F32TO16(struct brw_compile *p, struct brw_reg dst, struct brw_reg src)
1140 {
1141 const struct brw_context *brw = p->brw;
1142 bool align16 = brw_inst_access_mode(brw, p->current) == BRW_ALIGN_16;
1143
1144 if (align16) {
1145 assert(dst.type == BRW_REGISTER_TYPE_UD);
1146 } else {
1147 assert(dst.type == BRW_REGISTER_TYPE_W ||
1148 dst.type == BRW_REGISTER_TYPE_UW ||
1149 dst.type == BRW_REGISTER_TYPE_HF);
1150 }
1151
1152 if (brw->gen >= 8) {
1153 if (align16) {
1154 /* Emulate the Gen7 zeroing bug (see comments in vec4_visitor's
1155 * emit_pack_half_2x16 method.)
1156 */
1157 brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
1158 }
1159 return brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_HF), src);
1160 } else {
1161 assert(brw->gen == 7);
1162 return brw_alu1(p, BRW_OPCODE_F32TO16, dst, src);
1163 }
1164 }
1165
1166 brw_inst *
1167 brw_F16TO32(struct brw_compile *p, struct brw_reg dst, struct brw_reg src)
1168 {
1169 const struct brw_context *brw = p->brw;
1170 bool align16 = brw_inst_access_mode(brw, p->current) == BRW_ALIGN_16;
1171
1172 if (align16) {
1173 assert(src.type == BRW_REGISTER_TYPE_UD);
1174 } else {
1175 assert(src.type == BRW_REGISTER_TYPE_W ||
1176 src.type == BRW_REGISTER_TYPE_UW ||
1177 src.type == BRW_REGISTER_TYPE_HF);
1178 }
1179
1180 if (brw->gen >= 8) {
1181 return brw_MOV(p, dst, retype(src, BRW_REGISTER_TYPE_HF));
1182 } else {
1183 assert(brw->gen == 7);
1184 return brw_alu1(p, BRW_OPCODE_F16TO32, dst, src);
1185 }
1186 }
1187
1188
1189 void brw_NOP(struct brw_compile *p)
1190 {
1191 brw_inst *insn = next_insn(p, BRW_OPCODE_NOP);
1192 brw_set_dest(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
1193 brw_set_src0(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
1194 brw_set_src1(p, insn, brw_imm_ud(0x0));
1195 }
1196
1197
1198
1199
1200
1201 /***********************************************************************
1202 * Comparisons, if/else/endif
1203 */
1204
1205 brw_inst *
1206 brw_JMPI(struct brw_compile *p, struct brw_reg index,
1207 unsigned predicate_control)
1208 {
1209 const struct brw_context *brw = p->brw;
1210 struct brw_reg ip = brw_ip_reg();
1211 brw_inst *inst = brw_alu2(p, BRW_OPCODE_JMPI, ip, ip, index);
1212
1213 brw_inst_set_exec_size(brw, inst, BRW_EXECUTE_2);
1214 brw_inst_set_qtr_control(brw, inst, BRW_COMPRESSION_NONE);
1215 brw_inst_set_mask_control(brw, inst, BRW_MASK_DISABLE);
1216 brw_inst_set_pred_control(brw, inst, predicate_control);
1217
1218 return inst;
1219 }
1220
1221 static void
1222 push_if_stack(struct brw_compile *p, brw_inst *inst)
1223 {
1224 p->if_stack[p->if_stack_depth] = inst - p->store;
1225
1226 p->if_stack_depth++;
1227 if (p->if_stack_array_size <= p->if_stack_depth) {
1228 p->if_stack_array_size *= 2;
1229 p->if_stack = reralloc(p->mem_ctx, p->if_stack, int,
1230 p->if_stack_array_size);
1231 }
1232 }
1233
1234 static brw_inst *
1235 pop_if_stack(struct brw_compile *p)
1236 {
1237 p->if_stack_depth--;
1238 return &p->store[p->if_stack[p->if_stack_depth]];
1239 }
1240
1241 static void
1242 push_loop_stack(struct brw_compile *p, brw_inst *inst)
1243 {
1244 if (p->loop_stack_array_size < p->loop_stack_depth) {
1245 p->loop_stack_array_size *= 2;
1246 p->loop_stack = reralloc(p->mem_ctx, p->loop_stack, int,
1247 p->loop_stack_array_size);
1248 p->if_depth_in_loop = reralloc(p->mem_ctx, p->if_depth_in_loop, int,
1249 p->loop_stack_array_size);
1250 }
1251
1252 p->loop_stack[p->loop_stack_depth] = inst - p->store;
1253 p->loop_stack_depth++;
1254 p->if_depth_in_loop[p->loop_stack_depth] = 0;
1255 }
1256
1257 static brw_inst *
1258 get_inner_do_insn(struct brw_compile *p)
1259 {
1260 return &p->store[p->loop_stack[p->loop_stack_depth - 1]];
1261 }
1262
1263 /* EU takes the value from the flag register and pushes it onto some
1264 * sort of a stack (presumably merging with any flag value already on
1265 * the stack). Within an if block, the flags at the top of the stack
1266 * control execution on each channel of the unit, eg. on each of the
1267 * 16 pixel values in our wm programs.
1268 *
1269 * When the matching 'else' instruction is reached (presumably by
1270 * countdown of the instruction count patched in by our ELSE/ENDIF
1271 * functions), the relevent flags are inverted.
1272 *
1273 * When the matching 'endif' instruction is reached, the flags are
1274 * popped off. If the stack is now empty, normal execution resumes.
1275 */
1276 brw_inst *
1277 brw_IF(struct brw_compile *p, unsigned execute_size)
1278 {
1279 struct brw_context *brw = p->brw;
1280 brw_inst *insn;
1281
1282 insn = next_insn(p, BRW_OPCODE_IF);
1283
1284 /* Override the defaults for this instruction:
1285 */
1286 if (brw->gen < 6) {
1287 brw_set_dest(p, insn, brw_ip_reg());
1288 brw_set_src0(p, insn, brw_ip_reg());
1289 brw_set_src1(p, insn, brw_imm_d(0x0));
1290 } else if (brw->gen == 6) {
1291 brw_set_dest(p, insn, brw_imm_w(0));
1292 brw_inst_set_gen6_jump_count(brw, insn, 0);
1293 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1294 brw_set_src1(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1295 } else if (brw->gen == 7) {
1296 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1297 brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1298 brw_set_src1(p, insn, brw_imm_ud(0));
1299 brw_inst_set_jip(brw, insn, 0);
1300 brw_inst_set_uip(brw, insn, 0);
1301 } else {
1302 brw_set_dest(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
1303 brw_set_src0(p, insn, brw_imm_d(0));
1304 brw_inst_set_jip(brw, insn, 0);
1305 brw_inst_set_uip(brw, insn, 0);
1306 }
1307
1308 brw_inst_set_exec_size(brw, insn, execute_size);
1309 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1310 brw_inst_set_pred_control(brw, insn, BRW_PREDICATE_NORMAL);
1311 brw_inst_set_mask_control(brw, insn, BRW_MASK_ENABLE);
1312 if (!p->single_program_flow && brw->gen < 6)
1313 brw_inst_set_thread_control(brw, insn, BRW_THREAD_SWITCH);
1314
1315 push_if_stack(p, insn);
1316 p->if_depth_in_loop[p->loop_stack_depth]++;
1317 return insn;
1318 }
1319
1320 /* This function is only used for gen6-style IF instructions with an
1321 * embedded comparison (conditional modifier). It is not used on gen7.
1322 */
1323 brw_inst *
1324 gen6_IF(struct brw_compile *p, enum brw_conditional_mod conditional,
1325 struct brw_reg src0, struct brw_reg src1)
1326 {
1327 const struct brw_context *brw = p->brw;
1328 brw_inst *insn;
1329
1330 insn = next_insn(p, BRW_OPCODE_IF);
1331
1332 brw_set_dest(p, insn, brw_imm_w(0));
1333 brw_inst_set_exec_size(brw, insn, p->compressed ? BRW_EXECUTE_16
1334 : BRW_EXECUTE_8);
1335 brw_inst_set_gen6_jump_count(brw, insn, 0);
1336 brw_set_src0(p, insn, src0);
1337 brw_set_src1(p, insn, src1);
1338
1339 assert(brw_inst_qtr_control(brw, insn) == BRW_COMPRESSION_NONE);
1340 assert(brw_inst_pred_control(brw, insn) == BRW_PREDICATE_NONE);
1341 brw_inst_set_cond_modifier(brw, insn, conditional);
1342
1343 push_if_stack(p, insn);
1344 return insn;
1345 }
1346
1347 /**
1348 * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
1349 */
1350 static void
1351 convert_IF_ELSE_to_ADD(struct brw_compile *p,
1352 brw_inst *if_inst, brw_inst *else_inst)
1353 {
1354 const struct brw_context *brw = p->brw;
1355
1356 /* The next instruction (where the ENDIF would be, if it existed) */
1357 brw_inst *next_inst = &p->store[p->nr_insn];
1358
1359 assert(p->single_program_flow);
1360 assert(if_inst != NULL && brw_inst_opcode(brw, if_inst) == BRW_OPCODE_IF);
1361 assert(else_inst == NULL || brw_inst_opcode(brw, else_inst) == BRW_OPCODE_ELSE);
1362 assert(brw_inst_exec_size(brw, if_inst) == BRW_EXECUTE_1);
1363
1364 /* Convert IF to an ADD instruction that moves the instruction pointer
1365 * to the first instruction of the ELSE block. If there is no ELSE
1366 * block, point to where ENDIF would be. Reverse the predicate.
1367 *
1368 * There's no need to execute an ENDIF since we don't need to do any
1369 * stack operations, and if we're currently executing, we just want to
1370 * continue normally.
1371 */
1372 brw_inst_set_opcode(brw, if_inst, BRW_OPCODE_ADD);
1373 brw_inst_set_pred_inv(brw, if_inst, true);
1374
1375 if (else_inst != NULL) {
1376 /* Convert ELSE to an ADD instruction that points where the ENDIF
1377 * would be.
1378 */
1379 brw_inst_set_opcode(brw, else_inst, BRW_OPCODE_ADD);
1380
1381 brw_inst_set_imm_ud(brw, if_inst, (else_inst - if_inst + 1) * 16);
1382 brw_inst_set_imm_ud(brw, else_inst, (next_inst - else_inst) * 16);
1383 } else {
1384 brw_inst_set_imm_ud(brw, if_inst, (next_inst - if_inst) * 16);
1385 }
1386 }
1387
1388 /**
1389 * Patch IF and ELSE instructions with appropriate jump targets.
1390 */
1391 static void
1392 patch_IF_ELSE(struct brw_compile *p,
1393 brw_inst *if_inst, brw_inst *else_inst, brw_inst *endif_inst)
1394 {
1395 struct brw_context *brw = p->brw;
1396
1397 /* We shouldn't be patching IF and ELSE instructions in single program flow
1398 * mode when gen < 6, because in single program flow mode on those
1399 * platforms, we convert flow control instructions to conditional ADDs that
1400 * operate on IP (see brw_ENDIF).
1401 *
1402 * However, on Gen6, writing to IP doesn't work in single program flow mode
1403 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1404 * not be updated by non-flow control instructions."). And on later
1405 * platforms, there is no significant benefit to converting control flow
1406 * instructions to conditional ADDs. So we do patch IF and ELSE
1407 * instructions in single program flow mode on those platforms.
1408 */
1409 if (brw->gen < 6)
1410 assert(!p->single_program_flow);
1411
1412 assert(if_inst != NULL && brw_inst_opcode(brw, if_inst) == BRW_OPCODE_IF);
1413 assert(endif_inst != NULL);
1414 assert(else_inst == NULL || brw_inst_opcode(brw, else_inst) == BRW_OPCODE_ELSE);
1415
1416 unsigned br = brw_jump_scale(brw);
1417
1418 assert(brw_inst_opcode(brw, endif_inst) == BRW_OPCODE_ENDIF);
1419 brw_inst_set_exec_size(brw, endif_inst, brw_inst_exec_size(brw, if_inst));
1420
1421 if (else_inst == NULL) {
1422 /* Patch IF -> ENDIF */
1423 if (brw->gen < 6) {
1424 /* Turn it into an IFF, which means no mask stack operations for
1425 * all-false and jumping past the ENDIF.
1426 */
1427 brw_inst_set_opcode(brw, if_inst, BRW_OPCODE_IFF);
1428 brw_inst_set_gen4_jump_count(brw, if_inst,
1429 br * (endif_inst - if_inst + 1));
1430 brw_inst_set_gen4_pop_count(brw, if_inst, 0);
1431 } else if (brw->gen == 6) {
1432 /* As of gen6, there is no IFF and IF must point to the ENDIF. */
1433 brw_inst_set_gen6_jump_count(brw, if_inst, br*(endif_inst - if_inst));
1434 } else {
1435 brw_inst_set_uip(brw, if_inst, br * (endif_inst - if_inst));
1436 brw_inst_set_jip(brw, if_inst, br * (endif_inst - if_inst));
1437 }
1438 } else {
1439 brw_inst_set_exec_size(brw, else_inst, brw_inst_exec_size(brw, if_inst));
1440
1441 /* Patch IF -> ELSE */
1442 if (brw->gen < 6) {
1443 brw_inst_set_gen4_jump_count(brw, if_inst,
1444 br * (else_inst - if_inst));
1445 brw_inst_set_gen4_pop_count(brw, if_inst, 0);
1446 } else if (brw->gen == 6) {
1447 brw_inst_set_gen6_jump_count(brw, if_inst,
1448 br * (else_inst - if_inst + 1));
1449 }
1450
1451 /* Patch ELSE -> ENDIF */
1452 if (brw->gen < 6) {
1453 /* BRW_OPCODE_ELSE pre-gen6 should point just past the
1454 * matching ENDIF.
1455 */
1456 brw_inst_set_gen4_jump_count(brw, else_inst,
1457 br * (endif_inst - else_inst + 1));
1458 brw_inst_set_gen4_pop_count(brw, else_inst, 1);
1459 } else if (brw->gen == 6) {
1460 /* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
1461 brw_inst_set_gen6_jump_count(brw, else_inst,
1462 br * (endif_inst - else_inst));
1463 } else {
1464 /* The IF instruction's JIP should point just past the ELSE */
1465 brw_inst_set_jip(brw, if_inst, br * (else_inst - if_inst + 1));
1466 /* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
1467 brw_inst_set_uip(brw, if_inst, br * (endif_inst - if_inst));
1468 brw_inst_set_jip(brw, else_inst, br * (endif_inst - else_inst));
1469 if (brw->gen >= 8) {
1470 /* Since we don't set branch_ctrl, the ELSE's JIP and UIP both
1471 * should point to ENDIF.
1472 */
1473 brw_inst_set_uip(brw, else_inst, br * (endif_inst - else_inst));
1474 }
1475 }
1476 }
1477 }
1478
1479 void
1480 brw_ELSE(struct brw_compile *p)
1481 {
1482 struct brw_context *brw = p->brw;
1483 brw_inst *insn;
1484
1485 insn = next_insn(p, BRW_OPCODE_ELSE);
1486
1487 if (brw->gen < 6) {
1488 brw_set_dest(p, insn, brw_ip_reg());
1489 brw_set_src0(p, insn, brw_ip_reg());
1490 brw_set_src1(p, insn, brw_imm_d(0x0));
1491 } else if (brw->gen == 6) {
1492 brw_set_dest(p, insn, brw_imm_w(0));
1493 brw_inst_set_gen6_jump_count(brw, insn, 0);
1494 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1495 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1496 } else if (brw->gen == 7) {
1497 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1498 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1499 brw_set_src1(p, insn, brw_imm_ud(0));
1500 brw_inst_set_jip(brw, insn, 0);
1501 brw_inst_set_uip(brw, insn, 0);
1502 } else {
1503 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1504 brw_set_src0(p, insn, brw_imm_d(0));
1505 brw_inst_set_jip(brw, insn, 0);
1506 brw_inst_set_uip(brw, insn, 0);
1507 }
1508
1509 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1510 brw_inst_set_mask_control(brw, insn, BRW_MASK_ENABLE);
1511 if (!p->single_program_flow && brw->gen < 6)
1512 brw_inst_set_thread_control(brw, insn, BRW_THREAD_SWITCH);
1513
1514 push_if_stack(p, insn);
1515 }
1516
1517 void
1518 brw_ENDIF(struct brw_compile *p)
1519 {
1520 struct brw_context *brw = p->brw;
1521 brw_inst *insn = NULL;
1522 brw_inst *else_inst = NULL;
1523 brw_inst *if_inst = NULL;
1524 brw_inst *tmp;
1525 bool emit_endif = true;
1526
1527 /* In single program flow mode, we can express IF and ELSE instructions
1528 * equivalently as ADD instructions that operate on IP. On platforms prior
1529 * to Gen6, flow control instructions cause an implied thread switch, so
1530 * this is a significant savings.
1531 *
1532 * However, on Gen6, writing to IP doesn't work in single program flow mode
1533 * (see the SandyBridge PRM, Volume 4 part 2, p79: "When SPF is ON, IP may
1534 * not be updated by non-flow control instructions."). And on later
1535 * platforms, there is no significant benefit to converting control flow
1536 * instructions to conditional ADDs. So we only do this trick on Gen4 and
1537 * Gen5.
1538 */
1539 if (brw->gen < 6 && p->single_program_flow)
1540 emit_endif = false;
1541
1542 /*
1543 * A single next_insn() may change the base adress of instruction store
1544 * memory(p->store), so call it first before referencing the instruction
1545 * store pointer from an index
1546 */
1547 if (emit_endif)
1548 insn = next_insn(p, BRW_OPCODE_ENDIF);
1549
1550 /* Pop the IF and (optional) ELSE instructions from the stack */
1551 p->if_depth_in_loop[p->loop_stack_depth]--;
1552 tmp = pop_if_stack(p);
1553 if (brw_inst_opcode(brw, tmp) == BRW_OPCODE_ELSE) {
1554 else_inst = tmp;
1555 tmp = pop_if_stack(p);
1556 }
1557 if_inst = tmp;
1558
1559 if (!emit_endif) {
1560 /* ENDIF is useless; don't bother emitting it. */
1561 convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
1562 return;
1563 }
1564
1565 if (brw->gen < 6) {
1566 brw_set_dest(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
1567 brw_set_src0(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
1568 brw_set_src1(p, insn, brw_imm_d(0x0));
1569 } else if (brw->gen == 6) {
1570 brw_set_dest(p, insn, brw_imm_w(0));
1571 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1572 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1573 } else if (brw->gen == 7) {
1574 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1575 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1576 brw_set_src1(p, insn, brw_imm_ud(0));
1577 } else {
1578 brw_set_src0(p, insn, brw_imm_d(0));
1579 }
1580
1581 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1582 brw_inst_set_mask_control(brw, insn, BRW_MASK_ENABLE);
1583 if (brw->gen < 6)
1584 brw_inst_set_thread_control(brw, insn, BRW_THREAD_SWITCH);
1585
1586 /* Also pop item off the stack in the endif instruction: */
1587 if (brw->gen < 6) {
1588 brw_inst_set_gen4_jump_count(brw, insn, 0);
1589 brw_inst_set_gen4_pop_count(brw, insn, 1);
1590 } else if (brw->gen == 6) {
1591 brw_inst_set_gen6_jump_count(brw, insn, 2);
1592 } else {
1593 brw_inst_set_jip(brw, insn, 2);
1594 }
1595 patch_IF_ELSE(p, if_inst, else_inst, insn);
1596 }
1597
1598 brw_inst *
1599 brw_BREAK(struct brw_compile *p)
1600 {
1601 struct brw_context *brw = p->brw;
1602 brw_inst *insn;
1603
1604 insn = next_insn(p, BRW_OPCODE_BREAK);
1605 if (brw->gen >= 8) {
1606 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1607 brw_set_src0(p, insn, brw_imm_d(0x0));
1608 } else if (brw->gen >= 6) {
1609 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1610 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1611 brw_set_src1(p, insn, brw_imm_d(0x0));
1612 } else {
1613 brw_set_dest(p, insn, brw_ip_reg());
1614 brw_set_src0(p, insn, brw_ip_reg());
1615 brw_set_src1(p, insn, brw_imm_d(0x0));
1616 brw_inst_set_gen4_pop_count(brw, insn,
1617 p->if_depth_in_loop[p->loop_stack_depth]);
1618 }
1619 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1620 brw_inst_set_exec_size(brw, insn, p->compressed ? BRW_EXECUTE_16
1621 : BRW_EXECUTE_8);
1622
1623 return insn;
1624 }
1625
1626 brw_inst *
1627 brw_CONT(struct brw_compile *p)
1628 {
1629 const struct brw_context *brw = p->brw;
1630 brw_inst *insn;
1631
1632 insn = next_insn(p, BRW_OPCODE_CONTINUE);
1633 brw_set_dest(p, insn, brw_ip_reg());
1634 if (brw->gen >= 8) {
1635 brw_set_src0(p, insn, brw_imm_d(0x0));
1636 } else {
1637 brw_set_src0(p, insn, brw_ip_reg());
1638 brw_set_src1(p, insn, brw_imm_d(0x0));
1639 }
1640
1641 if (brw->gen < 6) {
1642 brw_inst_set_gen4_pop_count(brw, insn,
1643 p->if_depth_in_loop[p->loop_stack_depth]);
1644 }
1645 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1646 brw_inst_set_exec_size(brw, insn, p->compressed ? BRW_EXECUTE_16
1647 : BRW_EXECUTE_8);
1648 return insn;
1649 }
1650
1651 brw_inst *
1652 gen6_HALT(struct brw_compile *p)
1653 {
1654 const struct brw_context *brw = p->brw;
1655 brw_inst *insn;
1656
1657 insn = next_insn(p, BRW_OPCODE_HALT);
1658 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1659 if (brw->gen >= 8) {
1660 brw_set_src0(p, insn, brw_imm_d(0x0));
1661 } else {
1662 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1663 brw_set_src1(p, insn, brw_imm_d(0x0)); /* UIP and JIP, updated later. */
1664 }
1665
1666 if (p->compressed) {
1667 brw_inst_set_exec_size(brw, insn, BRW_EXECUTE_16);
1668 } else {
1669 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1670 brw_inst_set_exec_size(brw, insn, BRW_EXECUTE_8);
1671 }
1672 return insn;
1673 }
1674
1675 /* DO/WHILE loop:
1676 *
1677 * The DO/WHILE is just an unterminated loop -- break or continue are
1678 * used for control within the loop. We have a few ways they can be
1679 * done.
1680 *
1681 * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
1682 * jip and no DO instruction.
1683 *
1684 * For non-uniform control flow pre-gen6, there's a DO instruction to
1685 * push the mask, and a WHILE to jump back, and BREAK to get out and
1686 * pop the mask.
1687 *
1688 * For gen6, there's no more mask stack, so no need for DO. WHILE
1689 * just points back to the first instruction of the loop.
1690 */
1691 brw_inst *
1692 brw_DO(struct brw_compile *p, unsigned execute_size)
1693 {
1694 struct brw_context *brw = p->brw;
1695
1696 if (brw->gen >= 6 || p->single_program_flow) {
1697 push_loop_stack(p, &p->store[p->nr_insn]);
1698 return &p->store[p->nr_insn];
1699 } else {
1700 brw_inst *insn = next_insn(p, BRW_OPCODE_DO);
1701
1702 push_loop_stack(p, insn);
1703
1704 /* Override the defaults for this instruction:
1705 */
1706 brw_set_dest(p, insn, brw_null_reg());
1707 brw_set_src0(p, insn, brw_null_reg());
1708 brw_set_src1(p, insn, brw_null_reg());
1709
1710 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1711 brw_inst_set_exec_size(brw, insn, execute_size);
1712 brw_inst_set_pred_control(brw, insn, BRW_PREDICATE_NONE);
1713
1714 return insn;
1715 }
1716 }
1717
1718 /**
1719 * For pre-gen6, we patch BREAK/CONT instructions to point at the WHILE
1720 * instruction here.
1721 *
1722 * For gen6+, see brw_set_uip_jip(), which doesn't care so much about the loop
1723 * nesting, since it can always just point to the end of the block/current loop.
1724 */
1725 static void
1726 brw_patch_break_cont(struct brw_compile *p, brw_inst *while_inst)
1727 {
1728 struct brw_context *brw = p->brw;
1729 brw_inst *do_inst = get_inner_do_insn(p);
1730 brw_inst *inst;
1731 unsigned br = brw_jump_scale(brw);
1732
1733 assert(brw->gen < 6);
1734
1735 for (inst = while_inst - 1; inst != do_inst; inst--) {
1736 /* If the jump count is != 0, that means that this instruction has already
1737 * been patched because it's part of a loop inside of the one we're
1738 * patching.
1739 */
1740 if (brw_inst_opcode(brw, inst) == BRW_OPCODE_BREAK &&
1741 brw_inst_gen4_jump_count(brw, inst) == 0) {
1742 brw_inst_set_gen4_jump_count(brw, inst, br*((while_inst - inst) + 1));
1743 } else if (brw_inst_opcode(brw, inst) == BRW_OPCODE_CONTINUE &&
1744 brw_inst_gen4_jump_count(brw, inst) == 0) {
1745 brw_inst_set_gen4_jump_count(brw, inst, br * (while_inst - inst));
1746 }
1747 }
1748 }
1749
1750 brw_inst *
1751 brw_WHILE(struct brw_compile *p)
1752 {
1753 struct brw_context *brw = p->brw;
1754 brw_inst *insn, *do_insn;
1755 unsigned br = brw_jump_scale(brw);
1756
1757 if (brw->gen >= 6) {
1758 insn = next_insn(p, BRW_OPCODE_WHILE);
1759 do_insn = get_inner_do_insn(p);
1760
1761 if (brw->gen >= 8) {
1762 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1763 brw_set_src0(p, insn, brw_imm_d(0));
1764 brw_inst_set_jip(brw, insn, br * (do_insn - insn));
1765 } else if (brw->gen == 7) {
1766 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1767 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1768 brw_set_src1(p, insn, brw_imm_ud(0));
1769 brw_inst_set_jip(brw, insn, br * (do_insn - insn));
1770 } else {
1771 brw_set_dest(p, insn, brw_imm_w(0));
1772 brw_inst_set_gen6_jump_count(brw, insn, br * (do_insn - insn));
1773 brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1774 brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
1775 }
1776
1777 brw_inst_set_exec_size(brw, insn, p->compressed ? BRW_EXECUTE_16
1778 : BRW_EXECUTE_8);
1779 } else {
1780 if (p->single_program_flow) {
1781 insn = next_insn(p, BRW_OPCODE_ADD);
1782 do_insn = get_inner_do_insn(p);
1783
1784 brw_set_dest(p, insn, brw_ip_reg());
1785 brw_set_src0(p, insn, brw_ip_reg());
1786 brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
1787 brw_inst_set_exec_size(brw, insn, BRW_EXECUTE_1);
1788 } else {
1789 insn = next_insn(p, BRW_OPCODE_WHILE);
1790 do_insn = get_inner_do_insn(p);
1791
1792 assert(brw_inst_opcode(brw, do_insn) == BRW_OPCODE_DO);
1793
1794 brw_set_dest(p, insn, brw_ip_reg());
1795 brw_set_src0(p, insn, brw_ip_reg());
1796 brw_set_src1(p, insn, brw_imm_d(0));
1797
1798 brw_inst_set_exec_size(brw, insn, brw_inst_exec_size(brw, do_insn));
1799 brw_inst_set_gen4_jump_count(brw, insn, br * (do_insn - insn + 1));
1800 brw_inst_set_gen4_pop_count(brw, insn, 0);
1801
1802 brw_patch_break_cont(p, insn);
1803 }
1804 }
1805 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
1806
1807 p->loop_stack_depth--;
1808
1809 return insn;
1810 }
1811
1812 /* FORWARD JUMPS:
1813 */
1814 void brw_land_fwd_jump(struct brw_compile *p, int jmp_insn_idx)
1815 {
1816 struct brw_context *brw = p->brw;
1817 brw_inst *jmp_insn = &p->store[jmp_insn_idx];
1818 unsigned jmpi = 1;
1819
1820 if (brw->gen >= 5)
1821 jmpi = 2;
1822
1823 assert(brw_inst_opcode(brw, jmp_insn) == BRW_OPCODE_JMPI);
1824 assert(brw_inst_src1_reg_file(brw, jmp_insn) == BRW_IMMEDIATE_VALUE);
1825
1826 brw_inst_set_gen4_jump_count(brw, jmp_insn,
1827 jmpi * (p->nr_insn - jmp_insn_idx - 1));
1828 }
1829
1830 /* To integrate with the above, it makes sense that the comparison
1831 * instruction should populate the flag register. It might be simpler
1832 * just to use the flag reg for most WM tasks?
1833 */
1834 void brw_CMP(struct brw_compile *p,
1835 struct brw_reg dest,
1836 unsigned conditional,
1837 struct brw_reg src0,
1838 struct brw_reg src1)
1839 {
1840 struct brw_context *brw = p->brw;
1841 brw_inst *insn = next_insn(p, BRW_OPCODE_CMP);
1842
1843 if (brw->gen >= 8) {
1844 /* The CMP instruction appears to behave erratically for floating point
1845 * sources unless the destination type is also float. Overriding it to
1846 * match src0 makes it work in all cases.
1847 */
1848 dest.type = src0.type;
1849 }
1850
1851 brw_inst_set_cond_modifier(brw, insn, conditional);
1852 brw_set_dest(p, insn, dest);
1853 brw_set_src0(p, insn, src0);
1854 brw_set_src1(p, insn, src1);
1855
1856 /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
1857 * page says:
1858 * "Any CMP instruction with a null destination must use a {switch}."
1859 *
1860 * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
1861 * mentioned on their work-arounds pages.
1862 */
1863 if (brw->gen == 7) {
1864 if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1865 dest.nr == BRW_ARF_NULL) {
1866 brw_inst_set_thread_control(brw, insn, BRW_THREAD_SWITCH);
1867 }
1868 }
1869 }
1870
1871 /***********************************************************************
1872 * Helpers for the various SEND message types:
1873 */
1874
1875 /** Extended math function, float[8].
1876 */
1877 void gen4_math(struct brw_compile *p,
1878 struct brw_reg dest,
1879 unsigned function,
1880 unsigned msg_reg_nr,
1881 struct brw_reg src,
1882 unsigned precision )
1883 {
1884 struct brw_context *brw = p->brw;
1885 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
1886 unsigned data_type;
1887 if (src.vstride == BRW_VERTICAL_STRIDE_0 &&
1888 src.width == BRW_WIDTH_1 &&
1889 src.hstride == BRW_HORIZONTAL_STRIDE_0) {
1890 data_type = BRW_MATH_DATA_SCALAR;
1891 } else {
1892 data_type = BRW_MATH_DATA_VECTOR;
1893 }
1894
1895 assert(brw->gen < 6);
1896
1897 /* Example code doesn't set predicate_control for send
1898 * instructions.
1899 */
1900 brw_inst_set_pred_control(brw, insn, 0);
1901 brw_inst_set_base_mrf(brw, insn, msg_reg_nr);
1902
1903 brw_set_dest(p, insn, dest);
1904 brw_set_src0(p, insn, src);
1905 brw_set_math_message(p,
1906 insn,
1907 function,
1908 src.type == BRW_REGISTER_TYPE_D,
1909 precision,
1910 data_type);
1911 }
1912
1913 void gen6_math(struct brw_compile *p,
1914 struct brw_reg dest,
1915 unsigned function,
1916 struct brw_reg src0,
1917 struct brw_reg src1)
1918 {
1919 struct brw_context *brw = p->brw;
1920 brw_inst *insn = next_insn(p, BRW_OPCODE_MATH);
1921
1922 assert(brw->gen >= 6);
1923
1924 assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
1925 (brw->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
1926 assert(src0.file == BRW_GENERAL_REGISTER_FILE ||
1927 (brw->gen >= 8 && src0.file == BRW_IMMEDIATE_VALUE));
1928
1929 assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
1930 if (brw->gen == 6) {
1931 assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
1932 assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
1933 }
1934
1935 if (function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT ||
1936 function == BRW_MATH_FUNCTION_INT_DIV_REMAINDER ||
1937 function == BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
1938 assert(src0.type != BRW_REGISTER_TYPE_F);
1939 assert(src1.type != BRW_REGISTER_TYPE_F);
1940 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1941 (brw->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1942 } else {
1943 assert(src0.type == BRW_REGISTER_TYPE_F);
1944 assert(src1.type == BRW_REGISTER_TYPE_F);
1945 if (function == BRW_MATH_FUNCTION_POW) {
1946 assert(src1.file == BRW_GENERAL_REGISTER_FILE ||
1947 (brw->gen >= 8 && src1.file == BRW_IMMEDIATE_VALUE));
1948 } else {
1949 assert(src1.file == BRW_ARCHITECTURE_REGISTER_FILE &&
1950 src1.nr == BRW_ARF_NULL);
1951 }
1952 }
1953
1954 /* Source modifiers are ignored for extended math instructions on Gen6. */
1955 if (brw->gen == 6) {
1956 assert(!src0.negate);
1957 assert(!src0.abs);
1958 assert(!src1.negate);
1959 assert(!src1.abs);
1960 }
1961
1962 brw_inst_set_math_function(brw, insn, function);
1963
1964 brw_set_dest(p, insn, dest);
1965 brw_set_src0(p, insn, src0);
1966 brw_set_src1(p, insn, src1);
1967 }
1968
1969
1970 /**
1971 * Write a block of OWORDs (half a GRF each) from the scratch buffer,
1972 * using a constant offset per channel.
1973 *
1974 * The offset must be aligned to oword size (16 bytes). Used for
1975 * register spilling.
1976 */
1977 void brw_oword_block_write_scratch(struct brw_compile *p,
1978 struct brw_reg mrf,
1979 int num_regs,
1980 unsigned offset)
1981 {
1982 struct brw_context *brw = p->brw;
1983 uint32_t msg_control, msg_type;
1984 int mlen;
1985
1986 if (brw->gen >= 6)
1987 offset /= 16;
1988
1989 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
1990
1991 if (num_regs == 1) {
1992 msg_control = BRW_DATAPORT_OWORD_BLOCK_2_OWORDS;
1993 mlen = 2;
1994 } else {
1995 msg_control = BRW_DATAPORT_OWORD_BLOCK_4_OWORDS;
1996 mlen = 3;
1997 }
1998
1999 /* Set up the message header. This is g0, with g0.2 filled with
2000 * the offset. We don't want to leave our offset around in g0 or
2001 * it'll screw up texture samples, so set it up inside the message
2002 * reg.
2003 */
2004 {
2005 brw_push_insn_state(p);
2006 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2007 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2008
2009 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2010
2011 /* set message header global offset field (reg 0, element 2) */
2012 brw_MOV(p,
2013 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2014 mrf.nr,
2015 2), BRW_REGISTER_TYPE_UD),
2016 brw_imm_ud(offset));
2017
2018 brw_pop_insn_state(p);
2019 }
2020
2021 {
2022 struct brw_reg dest;
2023 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2024 int send_commit_msg;
2025 struct brw_reg src_header = retype(brw_vec8_grf(0, 0),
2026 BRW_REGISTER_TYPE_UW);
2027
2028 if (brw_inst_qtr_control(brw, insn) != BRW_COMPRESSION_NONE) {
2029 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
2030 src_header = vec16(src_header);
2031 }
2032 assert(brw_inst_pred_control(brw, insn) == BRW_PREDICATE_NONE);
2033 if (brw->gen < 6)
2034 brw_inst_set_base_mrf(brw, insn, mrf.nr);
2035
2036 /* Until gen6, writes followed by reads from the same location
2037 * are not guaranteed to be ordered unless write_commit is set.
2038 * If set, then a no-op write is issued to the destination
2039 * register to set a dependency, and a read from the destination
2040 * can be used to ensure the ordering.
2041 *
2042 * For gen6, only writes between different threads need ordering
2043 * protection. Our use of DP writes is all about register
2044 * spilling within a thread.
2045 */
2046 if (brw->gen >= 6) {
2047 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2048 send_commit_msg = 0;
2049 } else {
2050 dest = src_header;
2051 send_commit_msg = 1;
2052 }
2053
2054 brw_set_dest(p, insn, dest);
2055 if (brw->gen >= 6) {
2056 brw_set_src0(p, insn, mrf);
2057 } else {
2058 brw_set_src0(p, insn, brw_null_reg());
2059 }
2060
2061 if (brw->gen >= 6)
2062 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2063 else
2064 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
2065
2066 brw_set_dp_write_message(p,
2067 insn,
2068 255, /* binding table index (255=stateless) */
2069 msg_control,
2070 msg_type,
2071 mlen,
2072 true, /* header_present */
2073 0, /* not a render target */
2074 send_commit_msg, /* response_length */
2075 0, /* eot */
2076 send_commit_msg);
2077 }
2078 }
2079
2080
2081 /**
2082 * Read a block of owords (half a GRF each) from the scratch buffer
2083 * using a constant index per channel.
2084 *
2085 * Offset must be aligned to oword size (16 bytes). Used for register
2086 * spilling.
2087 */
2088 void
2089 brw_oword_block_read_scratch(struct brw_compile *p,
2090 struct brw_reg dest,
2091 struct brw_reg mrf,
2092 int num_regs,
2093 unsigned offset)
2094 {
2095 struct brw_context *brw = p->brw;
2096 uint32_t msg_control;
2097 int rlen;
2098
2099 if (brw->gen >= 6)
2100 offset /= 16;
2101
2102 if (p->brw->gen >= 7) {
2103 /* On gen 7 and above, we no longer have message registers and we can
2104 * send from any register we want. By using the destination register
2105 * for the message, we guarantee that the implied message write won't
2106 * accidentally overwrite anything. This has been a problem because
2107 * the MRF registers and source for the final FB write are both fixed
2108 * and may overlap.
2109 */
2110 mrf = retype(dest, BRW_REGISTER_TYPE_UD);
2111 } else {
2112 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2113 }
2114 dest = retype(dest, BRW_REGISTER_TYPE_UW);
2115
2116 if (num_regs == 1) {
2117 msg_control = BRW_DATAPORT_OWORD_BLOCK_2_OWORDS;
2118 rlen = 1;
2119 } else {
2120 msg_control = BRW_DATAPORT_OWORD_BLOCK_4_OWORDS;
2121 rlen = 2;
2122 }
2123
2124 {
2125 brw_push_insn_state(p);
2126 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2127 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2128
2129 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2130
2131 /* set message header global offset field (reg 0, element 2) */
2132 brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
2133
2134 brw_pop_insn_state(p);
2135 }
2136
2137 {
2138 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2139
2140 assert(brw_inst_pred_control(brw, insn) == 0);
2141 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
2142
2143 brw_set_dest(p, insn, dest); /* UW? */
2144 if (brw->gen >= 6) {
2145 brw_set_src0(p, insn, mrf);
2146 } else {
2147 brw_set_src0(p, insn, brw_null_reg());
2148 brw_inst_set_base_mrf(brw, insn, mrf.nr);
2149 }
2150
2151 brw_set_dp_read_message(p,
2152 insn,
2153 255, /* binding table index (255=stateless) */
2154 msg_control,
2155 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
2156 BRW_DATAPORT_READ_TARGET_RENDER_CACHE,
2157 1, /* msg_length */
2158 true, /* header_present */
2159 rlen);
2160 }
2161 }
2162
2163 void
2164 gen7_block_read_scratch(struct brw_compile *p,
2165 struct brw_reg dest,
2166 int num_regs,
2167 unsigned offset)
2168 {
2169 const struct brw_context *brw = p->brw;
2170 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2171 assert(brw_inst_pred_control(brw, insn) == BRW_PREDICATE_NONE);
2172
2173 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
2174 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UW));
2175
2176 /* The HW requires that the header is present; this is to get the g0.5
2177 * scratch offset.
2178 */
2179 brw_set_src0(p, insn, brw_vec8_grf(0, 0));
2180
2181 /* According to the docs, offset is "A 12-bit HWord offset into the memory
2182 * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
2183 * is 32 bytes, which happens to be the size of a register.
2184 */
2185 offset /= REG_SIZE;
2186 assert(offset < (1 << 12));
2187
2188 gen7_set_dp_scratch_message(p, insn,
2189 false, /* scratch read */
2190 false, /* OWords */
2191 false, /* invalidate after read */
2192 num_regs,
2193 offset,
2194 1, /* mlen: just g0 */
2195 num_regs, /* rlen */
2196 true); /* header present */
2197 }
2198
2199 /**
2200 * Read a float[4] vector from the data port Data Cache (const buffer).
2201 * Location (in buffer) should be a multiple of 16.
2202 * Used for fetching shader constants.
2203 */
2204 void brw_oword_block_read(struct brw_compile *p,
2205 struct brw_reg dest,
2206 struct brw_reg mrf,
2207 uint32_t offset,
2208 uint32_t bind_table_index)
2209 {
2210 struct brw_context *brw = p->brw;
2211
2212 /* On newer hardware, offset is in units of owords. */
2213 if (brw->gen >= 6)
2214 offset /= 16;
2215
2216 mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
2217
2218 brw_push_insn_state(p);
2219 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
2220 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
2221 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2222
2223 brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
2224
2225 /* set message header global offset field (reg 0, element 2) */
2226 brw_MOV(p,
2227 retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
2228 mrf.nr,
2229 2), BRW_REGISTER_TYPE_UD),
2230 brw_imm_ud(offset));
2231
2232 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2233
2234 /* cast dest to a uword[8] vector */
2235 dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
2236
2237 brw_set_dest(p, insn, dest);
2238 if (brw->gen >= 6) {
2239 brw_set_src0(p, insn, mrf);
2240 } else {
2241 brw_set_src0(p, insn, brw_null_reg());
2242 brw_inst_set_base_mrf(brw, insn, mrf.nr);
2243 }
2244
2245 brw_set_dp_read_message(p,
2246 insn,
2247 bind_table_index,
2248 BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW,
2249 BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
2250 BRW_DATAPORT_READ_TARGET_DATA_CACHE,
2251 1, /* msg_length */
2252 true, /* header_present */
2253 1); /* response_length (1 reg, 2 owords!) */
2254
2255 brw_pop_insn_state(p);
2256 }
2257
2258
2259 void brw_fb_WRITE(struct brw_compile *p,
2260 int dispatch_width,
2261 struct brw_reg payload,
2262 struct brw_reg implied_header,
2263 unsigned msg_control,
2264 unsigned binding_table_index,
2265 unsigned msg_length,
2266 unsigned response_length,
2267 bool eot,
2268 bool header_present)
2269 {
2270 struct brw_context *brw = p->brw;
2271 brw_inst *insn;
2272 unsigned msg_type;
2273 struct brw_reg dest, src0;
2274
2275 if (dispatch_width == 16)
2276 dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2277 else
2278 dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
2279
2280 if (brw->gen >= 6) {
2281 insn = next_insn(p, BRW_OPCODE_SENDC);
2282 } else {
2283 insn = next_insn(p, BRW_OPCODE_SEND);
2284 }
2285 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
2286
2287 if (brw->gen >= 6) {
2288 /* headerless version, just submit color payload */
2289 src0 = payload;
2290
2291 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2292 } else {
2293 assert(payload.file == BRW_MESSAGE_REGISTER_FILE);
2294 brw_inst_set_base_mrf(brw, insn, payload.nr);
2295 src0 = implied_header;
2296
2297 msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
2298 }
2299
2300 brw_set_dest(p, insn, dest);
2301 brw_set_src0(p, insn, src0);
2302 brw_set_dp_write_message(p,
2303 insn,
2304 binding_table_index,
2305 msg_control,
2306 msg_type,
2307 msg_length,
2308 header_present,
2309 eot, /* last render target write */
2310 response_length,
2311 eot,
2312 0 /* send_commit_msg */);
2313 }
2314
2315
2316 /**
2317 * Texture sample instruction.
2318 * Note: the msg_type plus msg_length values determine exactly what kind
2319 * of sampling operation is performed. See volume 4, page 161 of docs.
2320 */
2321 void brw_SAMPLE(struct brw_compile *p,
2322 struct brw_reg dest,
2323 unsigned msg_reg_nr,
2324 struct brw_reg src0,
2325 unsigned binding_table_index,
2326 unsigned sampler,
2327 unsigned msg_type,
2328 unsigned response_length,
2329 unsigned msg_length,
2330 unsigned header_present,
2331 unsigned simd_mode,
2332 unsigned return_format)
2333 {
2334 struct brw_context *brw = p->brw;
2335 brw_inst *insn;
2336
2337 if (msg_reg_nr != -1)
2338 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2339
2340 insn = next_insn(p, BRW_OPCODE_SEND);
2341 brw_inst_set_pred_control(brw, insn, BRW_PREDICATE_NONE); /* XXX */
2342
2343 /* From the 965 PRM (volume 4, part 1, section 14.2.41):
2344 *
2345 * "Instruction compression is not allowed for this instruction (that
2346 * is, send). The hardware behavior is undefined if this instruction is
2347 * set as compressed. However, compress control can be set to "SecHalf"
2348 * to affect the EMask generation."
2349 *
2350 * No similar wording is found in later PRMs, but there are examples
2351 * utilizing send with SecHalf. More importantly, SIMD8 sampler messages
2352 * are allowed in SIMD16 mode and they could not work without SecHalf. For
2353 * these reasons, we allow BRW_COMPRESSION_2NDHALF here.
2354 */
2355 if (brw_inst_qtr_control(brw, insn) != BRW_COMPRESSION_2NDHALF)
2356 brw_inst_set_qtr_control(brw, insn, BRW_COMPRESSION_NONE);
2357
2358 if (brw->gen < 6)
2359 brw_inst_set_base_mrf(brw, insn, msg_reg_nr);
2360
2361 brw_set_dest(p, insn, dest);
2362 brw_set_src0(p, insn, src0);
2363 brw_set_sampler_message(p, insn,
2364 binding_table_index,
2365 sampler,
2366 msg_type,
2367 response_length,
2368 msg_length,
2369 header_present,
2370 simd_mode,
2371 return_format);
2372 }
2373
2374 /* Adjust the message header's sampler state pointer to
2375 * select the correct group of 16 samplers.
2376 */
2377 void brw_adjust_sampler_state_pointer(struct brw_compile *p,
2378 struct brw_reg header,
2379 struct brw_reg sampler_index,
2380 struct brw_reg scratch)
2381 {
2382 /* The "Sampler Index" field can only store values between 0 and 15.
2383 * However, we can add an offset to the "Sampler State Pointer"
2384 * field, effectively selecting a different set of 16 samplers.
2385 *
2386 * The "Sampler State Pointer" needs to be aligned to a 32-byte
2387 * offset, and each sampler state is only 16-bytes, so we can't
2388 * exclusively use the offset - we have to use both.
2389 */
2390
2391 struct brw_context *brw = p->brw;
2392
2393 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
2394 const int sampler_state_size = 16; /* 16 bytes */
2395 uint32_t sampler = sampler_index.dw1.ud;
2396
2397 if (sampler >= 16) {
2398 assert(brw->is_haswell || brw->gen >= 8);
2399 brw_ADD(p,
2400 get_element_ud(header, 3),
2401 get_element_ud(brw_vec8_grf(0, 0), 3),
2402 brw_imm_ud(16 * (sampler / 16) * sampler_state_size));
2403 }
2404 } else {
2405 /* Non-const sampler array indexing case */
2406 if (brw->gen < 8 && !brw->is_haswell) {
2407 return;
2408 }
2409
2410 struct brw_reg temp = vec1(retype(scratch, BRW_REGISTER_TYPE_UD));
2411
2412 brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
2413 brw_SHL(p, temp, temp, brw_imm_ud(4));
2414 brw_ADD(p,
2415 get_element_ud(header, 3),
2416 get_element_ud(brw_vec8_grf(0, 0), 3),
2417 temp);
2418 }
2419 }
2420
2421 /* All these variables are pretty confusing - we might be better off
2422 * using bitmasks and macros for this, in the old style. Or perhaps
2423 * just having the caller instantiate the fields in dword3 itself.
2424 */
2425 void brw_urb_WRITE(struct brw_compile *p,
2426 struct brw_reg dest,
2427 unsigned msg_reg_nr,
2428 struct brw_reg src0,
2429 enum brw_urb_write_flags flags,
2430 unsigned msg_length,
2431 unsigned response_length,
2432 unsigned offset,
2433 unsigned swizzle)
2434 {
2435 struct brw_context *brw = p->brw;
2436 brw_inst *insn;
2437
2438 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2439
2440 if (brw->gen >= 7 && !(flags & BRW_URB_WRITE_USE_CHANNEL_MASKS)) {
2441 /* Enable Channel Masks in the URB_WRITE_HWORD message header */
2442 brw_push_insn_state(p);
2443 brw_set_default_access_mode(p, BRW_ALIGN_1);
2444 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2445 brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
2446 BRW_REGISTER_TYPE_UD),
2447 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
2448 brw_imm_ud(0xff00));
2449 brw_pop_insn_state(p);
2450 }
2451
2452 insn = next_insn(p, BRW_OPCODE_SEND);
2453
2454 assert(msg_length < BRW_MAX_MRF);
2455
2456 brw_set_dest(p, insn, dest);
2457 brw_set_src0(p, insn, src0);
2458 brw_set_src1(p, insn, brw_imm_d(0));
2459
2460 if (brw->gen < 6)
2461 brw_inst_set_base_mrf(brw, insn, msg_reg_nr);
2462
2463 brw_set_urb_message(p,
2464 insn,
2465 flags,
2466 msg_length,
2467 response_length,
2468 offset,
2469 swizzle);
2470 }
2471
2472 static int
2473 brw_find_next_block_end(struct brw_compile *p, int start_offset)
2474 {
2475 int offset;
2476 void *store = p->store;
2477 const struct brw_context *brw = p->brw;
2478
2479 for (offset = next_offset(brw, store, start_offset);
2480 offset < p->next_insn_offset;
2481 offset = next_offset(brw, store, offset)) {
2482 brw_inst *insn = store + offset;
2483
2484 switch (brw_inst_opcode(brw, insn)) {
2485 case BRW_OPCODE_ENDIF:
2486 case BRW_OPCODE_ELSE:
2487 case BRW_OPCODE_WHILE:
2488 case BRW_OPCODE_HALT:
2489 return offset;
2490 }
2491 }
2492
2493 return 0;
2494 }
2495
2496 /* There is no DO instruction on gen6, so to find the end of the loop
2497 * we have to see if the loop is jumping back before our start
2498 * instruction.
2499 */
2500 static int
2501 brw_find_loop_end(struct brw_compile *p, int start_offset)
2502 {
2503 struct brw_context *brw = p->brw;
2504 int offset;
2505 int scale = 16 / brw_jump_scale(brw);
2506 void *store = p->store;
2507
2508 assert(brw->gen >= 6);
2509
2510 /* Always start after the instruction (such as a WHILE) we're trying to fix
2511 * up.
2512 */
2513 for (offset = next_offset(brw, store, start_offset);
2514 offset < p->next_insn_offset;
2515 offset = next_offset(brw, store, offset)) {
2516 brw_inst *insn = store + offset;
2517
2518 if (brw_inst_opcode(brw, insn) == BRW_OPCODE_WHILE) {
2519 int jip = brw->gen == 6 ? brw_inst_gen6_jump_count(brw, insn)
2520 : brw_inst_jip(brw, insn);
2521 if (offset + jip * scale <= start_offset)
2522 return offset;
2523 }
2524 }
2525 assert(!"not reached");
2526 return start_offset;
2527 }
2528
2529 /* After program generation, go back and update the UIP and JIP of
2530 * BREAK, CONT, and HALT instructions to their correct locations.
2531 */
2532 void
2533 brw_set_uip_jip(struct brw_compile *p)
2534 {
2535 struct brw_context *brw = p->brw;
2536 int offset;
2537 int br = brw_jump_scale(brw);
2538 int scale = 16 / br;
2539 void *store = p->store;
2540
2541 if (brw->gen < 6)
2542 return;
2543
2544 for (offset = 0; offset < p->next_insn_offset;
2545 offset = next_offset(brw, store, offset)) {
2546 brw_inst *insn = store + offset;
2547
2548 if (brw_inst_cmpt_control(brw, insn)) {
2549 /* Fixups for compacted BREAK/CONTINUE not supported yet. */
2550 assert(brw_inst_opcode(brw, insn) != BRW_OPCODE_BREAK &&
2551 brw_inst_opcode(brw, insn) != BRW_OPCODE_CONTINUE &&
2552 brw_inst_opcode(brw, insn) != BRW_OPCODE_HALT);
2553 continue;
2554 }
2555
2556 int block_end_offset = brw_find_next_block_end(p, offset);
2557 switch (brw_inst_opcode(brw, insn)) {
2558 case BRW_OPCODE_BREAK:
2559 assert(block_end_offset != 0);
2560 brw_inst_set_jip(brw, insn, (block_end_offset - offset) / scale);
2561 /* Gen7 UIP points to WHILE; Gen6 points just after it */
2562 brw_inst_set_uip(brw, insn,
2563 (brw_find_loop_end(p, offset) - offset +
2564 (brw->gen == 6 ? 16 : 0)) / scale);
2565 break;
2566 case BRW_OPCODE_CONTINUE:
2567 assert(block_end_offset != 0);
2568 brw_inst_set_jip(brw, insn, (block_end_offset - offset) / scale);
2569 brw_inst_set_uip(brw, insn,
2570 (brw_find_loop_end(p, offset) - offset) / scale);
2571
2572 assert(brw_inst_uip(brw, insn) != 0);
2573 assert(brw_inst_jip(brw, insn) != 0);
2574 break;
2575
2576 case BRW_OPCODE_ENDIF: {
2577 int32_t jump = (block_end_offset == 0) ?
2578 1 * br : (block_end_offset - offset) / scale;
2579 if (brw->gen >= 7)
2580 brw_inst_set_jip(brw, insn, jump);
2581 else
2582 brw_inst_set_gen6_jump_count(brw, insn, jump);
2583 break;
2584 }
2585
2586 case BRW_OPCODE_HALT:
2587 /* From the Sandy Bridge PRM (volume 4, part 2, section 8.3.19):
2588 *
2589 * "In case of the halt instruction not inside any conditional
2590 * code block, the value of <JIP> and <UIP> should be the
2591 * same. In case of the halt instruction inside conditional code
2592 * block, the <UIP> should be the end of the program, and the
2593 * <JIP> should be end of the most inner conditional code block."
2594 *
2595 * The uip will have already been set by whoever set up the
2596 * instruction.
2597 */
2598 if (block_end_offset == 0) {
2599 brw_inst_set_jip(brw, insn, brw_inst_uip(brw, insn));
2600 } else {
2601 brw_inst_set_jip(brw, insn, (block_end_offset - offset) / scale);
2602 }
2603 assert(brw_inst_uip(brw, insn) != 0);
2604 assert(brw_inst_jip(brw, insn) != 0);
2605 break;
2606 }
2607 }
2608 }
2609
2610 void brw_ff_sync(struct brw_compile *p,
2611 struct brw_reg dest,
2612 unsigned msg_reg_nr,
2613 struct brw_reg src0,
2614 bool allocate,
2615 unsigned response_length,
2616 bool eot)
2617 {
2618 struct brw_context *brw = p->brw;
2619 brw_inst *insn;
2620
2621 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2622
2623 insn = next_insn(p, BRW_OPCODE_SEND);
2624 brw_set_dest(p, insn, dest);
2625 brw_set_src0(p, insn, src0);
2626 brw_set_src1(p, insn, brw_imm_d(0));
2627
2628 if (brw->gen < 6)
2629 brw_inst_set_base_mrf(brw, insn, msg_reg_nr);
2630
2631 brw_set_ff_sync_message(p,
2632 insn,
2633 allocate,
2634 response_length,
2635 eot);
2636 }
2637
2638 /**
2639 * Emit the SEND instruction necessary to generate stream output data on Gen6
2640 * (for transform feedback).
2641 *
2642 * If send_commit_msg is true, this is the last piece of stream output data
2643 * from this thread, so send the data as a committed write. According to the
2644 * Sandy Bridge PRM (volume 2 part 1, section 4.5.1):
2645 *
2646 * "Prior to End of Thread with a URB_WRITE, the kernel must ensure all
2647 * writes are complete by sending the final write as a committed write."
2648 */
2649 void
2650 brw_svb_write(struct brw_compile *p,
2651 struct brw_reg dest,
2652 unsigned msg_reg_nr,
2653 struct brw_reg src0,
2654 unsigned binding_table_index,
2655 bool send_commit_msg)
2656 {
2657 brw_inst *insn;
2658
2659 gen6_resolve_implied_move(p, &src0, msg_reg_nr);
2660
2661 insn = next_insn(p, BRW_OPCODE_SEND);
2662 brw_set_dest(p, insn, dest);
2663 brw_set_src0(p, insn, src0);
2664 brw_set_src1(p, insn, brw_imm_d(0));
2665 brw_set_dp_write_message(p, insn,
2666 binding_table_index,
2667 0, /* msg_control: ignored */
2668 GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE,
2669 1, /* msg_length */
2670 true, /* header_present */
2671 0, /* last_render_target: ignored */
2672 send_commit_msg, /* response_length */
2673 0, /* end_of_thread */
2674 send_commit_msg); /* send_commit_msg */
2675 }
2676
2677 static void
2678 brw_set_dp_untyped_atomic_message(struct brw_compile *p,
2679 brw_inst *insn,
2680 unsigned atomic_op,
2681 unsigned bind_table_index,
2682 unsigned msg_length,
2683 unsigned response_length,
2684 bool header_present)
2685 {
2686 const struct brw_context *brw = p->brw;
2687
2688 unsigned msg_control =
2689 atomic_op | /* Atomic Operation Type: BRW_AOP_* */
2690 (response_length ? 1 << 5 : 0); /* Return data expected */
2691
2692 if (brw->gen >= 8 || brw->is_haswell) {
2693 brw_set_message_descriptor(p, insn, HSW_SFID_DATAPORT_DATA_CACHE_1,
2694 msg_length, response_length,
2695 header_present, false);
2696
2697
2698 if (brw_inst_access_mode(brw, insn) == BRW_ALIGN_1) {
2699 if (brw_inst_exec_size(brw, insn) != BRW_EXECUTE_16)
2700 msg_control |= 1 << 4; /* SIMD8 mode */
2701
2702 brw_inst_set_dp_msg_type(brw, insn,
2703 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP);
2704 } else {
2705 brw_inst_set_dp_msg_type(brw, insn,
2706 HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2);
2707 }
2708 } else {
2709 brw_set_message_descriptor(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
2710 msg_length, response_length,
2711 header_present, false);
2712
2713 brw_inst_set_dp_msg_type(brw, insn, GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP);
2714
2715 if (brw_inst_exec_size(brw, insn) != BRW_EXECUTE_16)
2716 msg_control |= 1 << 4; /* SIMD8 mode */
2717 }
2718
2719 brw_inst_set_binding_table_index(brw, insn, bind_table_index);
2720 brw_inst_set_dp_msg_control(brw, insn, msg_control);
2721 }
2722
2723 void
2724 brw_untyped_atomic(struct brw_compile *p,
2725 struct brw_reg dest,
2726 struct brw_reg payload,
2727 unsigned atomic_op,
2728 unsigned bind_table_index,
2729 unsigned msg_length,
2730 unsigned response_length) {
2731 const struct brw_context *brw = p->brw;
2732 brw_inst *insn = brw_next_insn(p, BRW_OPCODE_SEND);
2733
2734 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UD));
2735 brw_set_src0(p, insn, retype(payload, BRW_REGISTER_TYPE_UD));
2736 brw_set_src1(p, insn, brw_imm_d(0));
2737 brw_set_dp_untyped_atomic_message(
2738 p, insn, atomic_op, bind_table_index, msg_length, response_length,
2739 brw_inst_access_mode(brw, insn) == BRW_ALIGN_1);
2740 }
2741
2742 static void
2743 brw_set_dp_untyped_surface_read_message(struct brw_compile *p,
2744 brw_inst *insn,
2745 unsigned bind_table_index,
2746 unsigned msg_length,
2747 unsigned response_length,
2748 bool header_present)
2749 {
2750 const struct brw_context *brw = p->brw;
2751 const unsigned dispatch_width =
2752 (brw_inst_exec_size(brw, insn) == BRW_EXECUTE_16 ? 16 : 8);
2753 const unsigned num_channels = response_length / (dispatch_width / 8);
2754
2755 if (brw->gen >= 8 || brw->is_haswell) {
2756 brw_set_message_descriptor(p, insn, HSW_SFID_DATAPORT_DATA_CACHE_1,
2757 msg_length, response_length,
2758 header_present, false);
2759
2760 brw_inst_set_dp_msg_type(brw, insn,
2761 HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ);
2762 } else {
2763 brw_set_message_descriptor(p, insn, GEN7_SFID_DATAPORT_DATA_CACHE,
2764 msg_length, response_length,
2765 header_present, false);
2766
2767 brw_inst_set_dp_msg_type(brw, insn,
2768 GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ);
2769 }
2770
2771 /* Set mask of 32-bit channels to drop. */
2772 unsigned msg_control = (0xf & (0xf << num_channels));
2773
2774 if (brw_inst_access_mode(brw, insn) == BRW_ALIGN_1) {
2775 if (dispatch_width == 16)
2776 msg_control |= 1 << 4; /* SIMD16 mode */
2777 else
2778 msg_control |= 2 << 4; /* SIMD8 mode */
2779 }
2780
2781 brw_inst_set_binding_table_index(brw, insn, bind_table_index);
2782 brw_inst_set_dp_msg_control(brw, insn, msg_control);
2783 }
2784
2785 void
2786 brw_untyped_surface_read(struct brw_compile *p,
2787 struct brw_reg dest,
2788 struct brw_reg mrf,
2789 unsigned bind_table_index,
2790 unsigned msg_length,
2791 unsigned response_length)
2792 {
2793 const struct brw_context *brw = p->brw;
2794 brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2795
2796 brw_set_dest(p, insn, retype(dest, BRW_REGISTER_TYPE_UD));
2797 brw_set_src0(p, insn, retype(mrf, BRW_REGISTER_TYPE_UD));
2798 brw_set_dp_untyped_surface_read_message(
2799 p, insn, bind_table_index, msg_length, response_length,
2800 brw_inst_access_mode(brw, insn) == BRW_ALIGN_1);
2801 }
2802
2803 void
2804 brw_pixel_interpolator_query(struct brw_compile *p,
2805 struct brw_reg dest,
2806 struct brw_reg mrf,
2807 bool noperspective,
2808 unsigned mode,
2809 unsigned data,
2810 unsigned msg_length,
2811 unsigned response_length)
2812 {
2813 const struct brw_context *brw = p->brw;
2814 struct brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
2815
2816 brw_set_dest(p, insn, dest);
2817 brw_set_src0(p, insn, mrf);
2818 brw_set_message_descriptor(p, insn, GEN7_SFID_PIXEL_INTERPOLATOR,
2819 msg_length, response_length,
2820 false /* header is never present for PI */,
2821 false);
2822
2823 brw_inst_set_pi_simd_mode(
2824 brw, insn, brw_inst_exec_size(brw, insn) == BRW_EXECUTE_16);
2825 brw_inst_set_pi_slot_group(brw, insn, 0); /* zero unless 32/64px dispatch */
2826 brw_inst_set_pi_nopersp(brw, insn, noperspective);
2827 brw_inst_set_pi_message_type(brw, insn, mode);
2828 brw_inst_set_pi_message_data(brw, insn, data);
2829 }
2830
2831 /**
2832 * This instruction is generated as a single-channel align1 instruction by
2833 * both the VS and FS stages when using INTEL_DEBUG=shader_time.
2834 *
2835 * We can't use the typed atomic op in the FS because that has the execution
2836 * mask ANDed with the pixel mask, but we just want to write the one dword for
2837 * all the pixels.
2838 *
2839 * We don't use the SIMD4x2 atomic ops in the VS because want to just write
2840 * one u32. So we use the same untyped atomic write message as the pixel
2841 * shader.
2842 *
2843 * The untyped atomic operation requires a BUFFER surface type with RAW
2844 * format, and is only accessible through the legacy DATA_CACHE dataport
2845 * messages.
2846 */
2847 void brw_shader_time_add(struct brw_compile *p,
2848 struct brw_reg payload,
2849 uint32_t surf_index)
2850 {
2851 assert(p->brw->gen >= 7);
2852
2853 brw_push_insn_state(p);
2854 brw_set_default_access_mode(p, BRW_ALIGN_1);
2855 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2856 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
2857 brw_pop_insn_state(p);
2858
2859 /* We use brw_vec1_reg and unmasked because we want to increment the given
2860 * offset only once.
2861 */
2862 brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
2863 BRW_ARF_NULL, 0));
2864 brw_set_src0(p, send, brw_vec1_reg(payload.file,
2865 payload.nr, 0));
2866 brw_set_dp_untyped_atomic_message(p, send, BRW_AOP_ADD, surf_index,
2867 2 /* message length */,
2868 0 /* response length */,
2869 false /* header present */);
2870 }