i965/vec4: Drop brw_set_default_* before popping insn state.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_generator.cpp
1 /* Copyright © 2011 Intel Corporation
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
12 * Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 * IN THE SOFTWARE.
21 */
22
23 #include "glsl/glsl_parser_extras.h"
24 #include "brw_vec4.h"
25 #include "brw_cfg.h"
26
27 using namespace brw;
28
29 static void
30 generate_math1_gen4(struct brw_codegen *p,
31 vec4_instruction *inst,
32 struct brw_reg dst,
33 struct brw_reg src)
34 {
35 gen4_math(p,
36 dst,
37 brw_math_function(inst->opcode),
38 inst->base_mrf,
39 src,
40 BRW_MATH_PRECISION_FULL);
41 }
42
43 static void
44 check_gen6_math_src_arg(struct brw_reg src)
45 {
46 /* Source swizzles are ignored. */
47 assert(!src.abs);
48 assert(!src.negate);
49 assert(src.dw1.bits.swizzle == BRW_SWIZZLE_XYZW);
50 }
51
52 static void
53 generate_math_gen6(struct brw_codegen *p,
54 vec4_instruction *inst,
55 struct brw_reg dst,
56 struct brw_reg src0,
57 struct brw_reg src1)
58 {
59 /* Can't do writemask because math can't be align16. */
60 assert(dst.dw1.bits.writemask == WRITEMASK_XYZW);
61 /* Source swizzles are ignored. */
62 check_gen6_math_src_arg(src0);
63 if (src1.file == BRW_GENERAL_REGISTER_FILE)
64 check_gen6_math_src_arg(src1);
65
66 brw_set_default_access_mode(p, BRW_ALIGN_1);
67 gen6_math(p, dst, brw_math_function(inst->opcode), src0, src1);
68 brw_set_default_access_mode(p, BRW_ALIGN_16);
69 }
70
71 static void
72 generate_math2_gen4(struct brw_codegen *p,
73 vec4_instruction *inst,
74 struct brw_reg dst,
75 struct brw_reg src0,
76 struct brw_reg src1)
77 {
78 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
79 * "Message Payload":
80 *
81 * "Operand0[7]. For the INT DIV functions, this operand is the
82 * denominator."
83 * ...
84 * "Operand1[7]. For the INT DIV functions, this operand is the
85 * numerator."
86 */
87 bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
88 struct brw_reg &op0 = is_int_div ? src1 : src0;
89 struct brw_reg &op1 = is_int_div ? src0 : src1;
90
91 brw_push_insn_state(p);
92 brw_set_default_saturate(p, false);
93 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
94 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), op1.type), op1);
95 brw_pop_insn_state(p);
96
97 gen4_math(p,
98 dst,
99 brw_math_function(inst->opcode),
100 inst->base_mrf,
101 op0,
102 BRW_MATH_PRECISION_FULL);
103 }
104
105 static void
106 generate_tex(struct brw_codegen *p,
107 struct brw_vue_prog_data *prog_data,
108 vec4_instruction *inst,
109 struct brw_reg dst,
110 struct brw_reg src,
111 struct brw_reg sampler_index)
112 {
113 const struct brw_device_info *devinfo = p->devinfo;
114 int msg_type = -1;
115
116 if (devinfo->gen >= 5) {
117 switch (inst->opcode) {
118 case SHADER_OPCODE_TEX:
119 case SHADER_OPCODE_TXL:
120 if (inst->shadow_compare) {
121 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
122 } else {
123 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
124 }
125 break;
126 case SHADER_OPCODE_TXD:
127 if (inst->shadow_compare) {
128 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
129 assert(devinfo->gen >= 8 || devinfo->is_haswell);
130 msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE;
131 } else {
132 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
133 }
134 break;
135 case SHADER_OPCODE_TXF:
136 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
137 break;
138 case SHADER_OPCODE_TXF_CMS:
139 if (devinfo->gen >= 7)
140 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
141 else
142 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
143 break;
144 case SHADER_OPCODE_TXF_MCS:
145 assert(devinfo->gen >= 7);
146 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
147 break;
148 case SHADER_OPCODE_TXS:
149 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
150 break;
151 case SHADER_OPCODE_TG4:
152 if (inst->shadow_compare) {
153 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C;
154 } else {
155 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
156 }
157 break;
158 case SHADER_OPCODE_TG4_OFFSET:
159 if (inst->shadow_compare) {
160 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C;
161 } else {
162 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
163 }
164 break;
165 case SHADER_OPCODE_SAMPLEINFO:
166 msg_type = GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
167 break;
168 default:
169 unreachable("should not get here: invalid vec4 texture opcode");
170 }
171 } else {
172 switch (inst->opcode) {
173 case SHADER_OPCODE_TEX:
174 case SHADER_OPCODE_TXL:
175 if (inst->shadow_compare) {
176 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE;
177 assert(inst->mlen == 3);
178 } else {
179 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD;
180 assert(inst->mlen == 2);
181 }
182 break;
183 case SHADER_OPCODE_TXD:
184 /* There is no sample_d_c message; comparisons are done manually. */
185 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS;
186 assert(inst->mlen == 4);
187 break;
188 case SHADER_OPCODE_TXF:
189 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_LD;
190 assert(inst->mlen == 2);
191 break;
192 case SHADER_OPCODE_TXS:
193 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO;
194 assert(inst->mlen == 2);
195 break;
196 default:
197 unreachable("should not get here: invalid vec4 texture opcode");
198 }
199 }
200
201 assert(msg_type != -1);
202
203 assert(sampler_index.type == BRW_REGISTER_TYPE_UD);
204
205 /* Load the message header if present. If there's a texture offset, we need
206 * to set it up explicitly and load the offset bitfield. Otherwise, we can
207 * use an implied move from g0 to the first message register.
208 */
209 if (inst->header_size != 0) {
210 if (devinfo->gen < 6 && !inst->offset) {
211 /* Set up an implied move from g0 to the MRF. */
212 src = brw_vec8_grf(0, 0);
213 } else {
214 struct brw_reg header =
215 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD);
216 uint32_t dw2 = 0;
217
218 /* Explicitly set up the message header by copying g0 to the MRF. */
219 brw_push_insn_state(p);
220 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
221 brw_MOV(p, header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
222
223 brw_set_default_access_mode(p, BRW_ALIGN_1);
224
225 if (inst->offset)
226 /* Set the texel offset bits in DWord 2. */
227 dw2 = inst->offset;
228
229 if (devinfo->gen >= 9)
230 /* SKL+ overloads BRW_SAMPLER_SIMD_MODE_SIMD4X2 to also do SIMD8D,
231 * based on bit 22 in the header.
232 */
233 dw2 |= GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2;
234
235 if (dw2)
236 brw_MOV(p, get_element_ud(header, 2), brw_imm_ud(dw2));
237
238 brw_adjust_sampler_state_pointer(p, header, sampler_index);
239 brw_pop_insn_state(p);
240 }
241 }
242
243 uint32_t return_format;
244
245 switch (dst.type) {
246 case BRW_REGISTER_TYPE_D:
247 return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
248 break;
249 case BRW_REGISTER_TYPE_UD:
250 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
251 break;
252 default:
253 return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
254 break;
255 }
256
257 uint32_t base_binding_table_index = (inst->opcode == SHADER_OPCODE_TG4 ||
258 inst->opcode == SHADER_OPCODE_TG4_OFFSET)
259 ? prog_data->base.binding_table.gather_texture_start
260 : prog_data->base.binding_table.texture_start;
261
262 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
263 uint32_t sampler = sampler_index.dw1.ud;
264
265 brw_SAMPLE(p,
266 dst,
267 inst->base_mrf,
268 src,
269 sampler + base_binding_table_index,
270 sampler % 16,
271 msg_type,
272 1, /* response length */
273 inst->mlen,
274 inst->header_size != 0,
275 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
276 return_format);
277
278 brw_mark_surface_used(&prog_data->base, sampler + base_binding_table_index);
279 } else {
280 /* Non-constant sampler index. */
281
282 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
283 struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD));
284
285 brw_push_insn_state(p);
286 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
287 brw_set_default_access_mode(p, BRW_ALIGN_1);
288
289 /* addr = ((sampler * 0x101) + base_binding_table_index) & 0xfff */
290 brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
291 if (base_binding_table_index)
292 brw_ADD(p, addr, addr, brw_imm_ud(base_binding_table_index));
293 brw_AND(p, addr, addr, brw_imm_ud(0xfff));
294
295 brw_pop_insn_state(p);
296
297 if (inst->base_mrf != -1)
298 gen6_resolve_implied_move(p, &src, inst->base_mrf);
299
300 /* dst = send(offset, a0.0 | <descriptor>) */
301 brw_inst *insn = brw_send_indirect_message(
302 p, BRW_SFID_SAMPLER, dst, src, addr);
303 brw_set_sampler_message(p, insn,
304 0 /* surface */,
305 0 /* sampler */,
306 msg_type,
307 1 /* rlen */,
308 inst->mlen /* mlen */,
309 inst->header_size != 0 /* header */,
310 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
311 return_format);
312
313 /* visitor knows more than we do about the surface limit required,
314 * so has already done marking.
315 */
316 }
317 }
318
319 static void
320 generate_vs_urb_write(struct brw_codegen *p, vec4_instruction *inst)
321 {
322 brw_urb_WRITE(p,
323 brw_null_reg(), /* dest */
324 inst->base_mrf, /* starting mrf reg nr */
325 brw_vec8_grf(0, 0), /* src */
326 inst->urb_write_flags,
327 inst->mlen,
328 0, /* response len */
329 inst->offset, /* urb destination offset */
330 BRW_URB_SWIZZLE_INTERLEAVE);
331 }
332
333 static void
334 generate_gs_urb_write(struct brw_codegen *p, vec4_instruction *inst)
335 {
336 struct brw_reg src = brw_message_reg(inst->base_mrf);
337 brw_urb_WRITE(p,
338 brw_null_reg(), /* dest */
339 inst->base_mrf, /* starting mrf reg nr */
340 src,
341 inst->urb_write_flags,
342 inst->mlen,
343 0, /* response len */
344 inst->offset, /* urb destination offset */
345 BRW_URB_SWIZZLE_INTERLEAVE);
346 }
347
348 static void
349 generate_gs_urb_write_allocate(struct brw_codegen *p, vec4_instruction *inst)
350 {
351 struct brw_reg src = brw_message_reg(inst->base_mrf);
352
353 /* We pass the temporary passed in src0 as the writeback register */
354 brw_urb_WRITE(p,
355 inst->src[0].fixed_hw_reg, /* dest */
356 inst->base_mrf, /* starting mrf reg nr */
357 src,
358 BRW_URB_WRITE_ALLOCATE_COMPLETE,
359 inst->mlen,
360 1, /* response len */
361 inst->offset, /* urb destination offset */
362 BRW_URB_SWIZZLE_INTERLEAVE);
363
364 /* Now put allocated urb handle in dst.0 */
365 brw_push_insn_state(p);
366 brw_set_default_access_mode(p, BRW_ALIGN_1);
367 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
368 brw_MOV(p, get_element_ud(inst->dst.fixed_hw_reg, 0),
369 get_element_ud(inst->src[0].fixed_hw_reg, 0));
370 brw_pop_insn_state(p);
371 }
372
373 static void
374 generate_gs_thread_end(struct brw_codegen *p, vec4_instruction *inst)
375 {
376 struct brw_reg src = brw_message_reg(inst->base_mrf);
377 brw_urb_WRITE(p,
378 brw_null_reg(), /* dest */
379 inst->base_mrf, /* starting mrf reg nr */
380 src,
381 BRW_URB_WRITE_EOT | inst->urb_write_flags,
382 inst->mlen,
383 0, /* response len */
384 0, /* urb destination offset */
385 BRW_URB_SWIZZLE_INTERLEAVE);
386 }
387
388 static void
389 generate_gs_set_write_offset(struct brw_codegen *p,
390 struct brw_reg dst,
391 struct brw_reg src0,
392 struct brw_reg src1)
393 {
394 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
395 * Header: M0.3):
396 *
397 * Slot 0 Offset. This field, after adding to the Global Offset field
398 * in the message descriptor, specifies the offset (in 256-bit units)
399 * from the start of the URB entry, as referenced by URB Handle 0, at
400 * which the data will be accessed.
401 *
402 * Similar text describes DWORD M0.4, which is slot 1 offset.
403 *
404 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
405 * of the register for geometry shader invocations 0 and 1) by the
406 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
407 *
408 * We can do this with the following EU instruction:
409 *
410 * mul(2) dst.3<1>UD src0<8;2,4>UD src1<...>UW { Align1 WE_all }
411 */
412 brw_push_insn_state(p);
413 brw_set_default_access_mode(p, BRW_ALIGN_1);
414 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
415 assert(p->devinfo->gen >= 7 &&
416 src1.file == BRW_IMMEDIATE_VALUE &&
417 src1.type == BRW_REGISTER_TYPE_UD &&
418 src1.dw1.ud <= USHRT_MAX);
419 if (src0.file == IMM) {
420 brw_MOV(p, suboffset(stride(dst, 2, 2, 1), 3),
421 brw_imm_ud(src0.dw1.ud * src1.dw1.ud));
422 } else {
423 brw_MUL(p, suboffset(stride(dst, 2, 2, 1), 3), stride(src0, 8, 2, 4),
424 retype(src1, BRW_REGISTER_TYPE_UW));
425 }
426 brw_pop_insn_state(p);
427 }
428
429 static void
430 generate_gs_set_vertex_count(struct brw_codegen *p,
431 struct brw_reg dst,
432 struct brw_reg src)
433 {
434 brw_push_insn_state(p);
435 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
436
437 if (p->devinfo->gen >= 8) {
438 /* Move the vertex count into the second MRF for the EOT write. */
439 brw_MOV(p, retype(brw_message_reg(dst.nr + 1), BRW_REGISTER_TYPE_UD),
440 src);
441 } else {
442 /* If we think of the src and dst registers as composed of 8 DWORDs each,
443 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
444 * them to WORDs, and then pack them into DWORD 2 of dst.
445 *
446 * It's easier to get the EU to do this if we think of the src and dst
447 * registers as composed of 16 WORDS each; then, we want to pick up the
448 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5
449 * of dst.
450 *
451 * We can do that by the following EU instruction:
452 *
453 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
454 */
455 brw_set_default_access_mode(p, BRW_ALIGN_1);
456 brw_MOV(p,
457 suboffset(stride(retype(dst, BRW_REGISTER_TYPE_UW), 2, 2, 1), 4),
458 stride(retype(src, BRW_REGISTER_TYPE_UW), 8, 1, 0));
459 }
460 brw_pop_insn_state(p);
461 }
462
463 static void
464 generate_gs_svb_write(struct brw_codegen *p,
465 struct brw_vue_prog_data *prog_data,
466 vec4_instruction *inst,
467 struct brw_reg dst,
468 struct brw_reg src0,
469 struct brw_reg src1)
470 {
471 int binding = inst->sol_binding;
472 bool final_write = inst->sol_final_write;
473
474 brw_push_insn_state(p);
475 /* Copy Vertex data into M0.x */
476 brw_MOV(p, stride(dst, 4, 4, 1),
477 stride(retype(src0, BRW_REGISTER_TYPE_UD), 4, 4, 1));
478
479 /* Send SVB Write */
480 brw_svb_write(p,
481 final_write ? src1 : brw_null_reg(), /* dest == src1 */
482 1, /* msg_reg_nr */
483 dst, /* src0 == previous dst */
484 SURF_INDEX_GEN6_SOL_BINDING(binding), /* binding_table_index */
485 final_write); /* send_commit_msg */
486
487 /* Finally, wait for the write commit to occur so that we can proceed to
488 * other things safely.
489 *
490 * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
491 *
492 * The write commit does not modify the destination register, but
493 * merely clears the dependency associated with the destination
494 * register. Thus, a simple “mov” instruction using the register as a
495 * source is sufficient to wait for the write commit to occur.
496 */
497 if (final_write) {
498 brw_MOV(p, src1, src1);
499 }
500 brw_pop_insn_state(p);
501 }
502
503 static void
504 generate_gs_svb_set_destination_index(struct brw_codegen *p,
505 vec4_instruction *inst,
506 struct brw_reg dst,
507 struct brw_reg src)
508 {
509 int vertex = inst->sol_vertex;
510 brw_push_insn_state(p);
511 brw_set_default_access_mode(p, BRW_ALIGN_1);
512 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
513 brw_MOV(p, get_element_ud(dst, 5), get_element_ud(src, vertex));
514 brw_pop_insn_state(p);
515 }
516
517 static void
518 generate_gs_set_dword_2(struct brw_codegen *p,
519 struct brw_reg dst,
520 struct brw_reg src)
521 {
522 brw_push_insn_state(p);
523 brw_set_default_access_mode(p, BRW_ALIGN_1);
524 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
525 brw_MOV(p, suboffset(vec1(dst), 2), suboffset(vec1(src), 0));
526 brw_pop_insn_state(p);
527 }
528
529 static void
530 generate_gs_prepare_channel_masks(struct brw_codegen *p,
531 struct brw_reg dst)
532 {
533 /* We want to left shift just DWORD 4 (the x component belonging to the
534 * second geometry shader invocation) by 4 bits. So generate the
535 * instruction:
536 *
537 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
538 */
539 dst = suboffset(vec1(dst), 4);
540 brw_push_insn_state(p);
541 brw_set_default_access_mode(p, BRW_ALIGN_1);
542 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
543 brw_SHL(p, dst, dst, brw_imm_ud(4));
544 brw_pop_insn_state(p);
545 }
546
547 static void
548 generate_gs_set_channel_masks(struct brw_codegen *p,
549 struct brw_reg dst,
550 struct brw_reg src)
551 {
552 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
553 * Header: M0.5):
554 *
555 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
556 *
557 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
558 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
559 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
560 * channel enable to determine the final channel enable. For the
561 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
562 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
563 * in the writeback message. For the URB_WRITE_OWORD &
564 * URB_WRITE_HWORD messages, when final channel enable is 1 it
565 * indicates that Vertex 1 DATA [3] will be written to the surface.
566 *
567 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
568 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
569 *
570 * 14 Vertex 1 DATA [2] Channel Mask
571 * 13 Vertex 1 DATA [1] Channel Mask
572 * 12 Vertex 1 DATA [0] Channel Mask
573 * 11 Vertex 0 DATA [3] Channel Mask
574 * 10 Vertex 0 DATA [2] Channel Mask
575 * 9 Vertex 0 DATA [1] Channel Mask
576 * 8 Vertex 0 DATA [0] Channel Mask
577 *
578 * (This is from a section of the PRM that is agnostic to the particular
579 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
580 * geometry shader invocations 0 and 1, respectively). Since we have the
581 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
582 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
583 * DWORD 4, we just need to OR them together and store the result in bits
584 * 15:8 of DWORD 5.
585 *
586 * It's easier to get the EU to do this if we think of the src and dst
587 * registers as composed of 32 bytes each; then, we want to pick up the
588 * contents of bytes 0 and 16 from src, OR them together, and store them in
589 * byte 21.
590 *
591 * We can do that by the following EU instruction:
592 *
593 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
594 *
595 * Note: this relies on the source register having zeros in (a) bits 7:4 of
596 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
597 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
598 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
599 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
600 * contain valid channel mask values (which are in the range 0x0-0xf).
601 */
602 dst = retype(dst, BRW_REGISTER_TYPE_UB);
603 src = retype(src, BRW_REGISTER_TYPE_UB);
604 brw_push_insn_state(p);
605 brw_set_default_access_mode(p, BRW_ALIGN_1);
606 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
607 brw_OR(p, suboffset(vec1(dst), 21), vec1(src), suboffset(vec1(src), 16));
608 brw_pop_insn_state(p);
609 }
610
611 static void
612 generate_gs_get_instance_id(struct brw_codegen *p,
613 struct brw_reg dst)
614 {
615 /* We want to right shift R0.0 & R0.1 by GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
616 * and store into dst.0 & dst.4. So generate the instruction:
617 *
618 * shr(8) dst<1> R0<1,4,0> GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
619 */
620 brw_push_insn_state(p);
621 brw_set_default_access_mode(p, BRW_ALIGN_1);
622 dst = retype(dst, BRW_REGISTER_TYPE_UD);
623 struct brw_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
624 brw_SHR(p, dst, stride(r0, 1, 4, 0),
625 brw_imm_ud(GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT));
626 brw_pop_insn_state(p);
627 }
628
629 static void
630 generate_gs_ff_sync_set_primitives(struct brw_codegen *p,
631 struct brw_reg dst,
632 struct brw_reg src0,
633 struct brw_reg src1,
634 struct brw_reg src2)
635 {
636 brw_push_insn_state(p);
637 brw_set_default_access_mode(p, BRW_ALIGN_1);
638 /* Save src0 data in 16:31 bits of dst.0 */
639 brw_AND(p, suboffset(vec1(dst), 0), suboffset(vec1(src0), 0),
640 brw_imm_ud(0xffffu));
641 brw_SHL(p, suboffset(vec1(dst), 0), suboffset(vec1(dst), 0), brw_imm_ud(16));
642 /* Save src1 data in 0:15 bits of dst.0 */
643 brw_AND(p, suboffset(vec1(src2), 0), suboffset(vec1(src1), 0),
644 brw_imm_ud(0xffffu));
645 brw_OR(p, suboffset(vec1(dst), 0),
646 suboffset(vec1(dst), 0),
647 suboffset(vec1(src2), 0));
648 brw_pop_insn_state(p);
649 }
650
651 static void
652 generate_gs_ff_sync(struct brw_codegen *p,
653 vec4_instruction *inst,
654 struct brw_reg dst,
655 struct brw_reg src0,
656 struct brw_reg src1)
657 {
658 /* This opcode uses an implied MRF register for:
659 * - the header of the ff_sync message. And as such it is expected to be
660 * initialized to r0 before calling here.
661 * - the destination where we will write the allocated URB handle.
662 */
663 struct brw_reg header =
664 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD);
665
666 /* Overwrite dword 0 of the header (SO vertices to write) and
667 * dword 1 (number of primitives written).
668 */
669 brw_push_insn_state(p);
670 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
671 brw_set_default_access_mode(p, BRW_ALIGN_1);
672 brw_MOV(p, get_element_ud(header, 0), get_element_ud(src1, 0));
673 brw_MOV(p, get_element_ud(header, 1), get_element_ud(src0, 0));
674 brw_pop_insn_state(p);
675
676 /* Allocate URB handle in dst */
677 brw_ff_sync(p,
678 dst,
679 0,
680 header,
681 1, /* allocate */
682 1, /* response length */
683 0 /* eot */);
684
685 /* Now put allocated urb handle in header.0 */
686 brw_push_insn_state(p);
687 brw_set_default_access_mode(p, BRW_ALIGN_1);
688 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
689 brw_MOV(p, get_element_ud(header, 0), get_element_ud(dst, 0));
690
691 /* src1 is not an immediate when we use transform feedback */
692 if (src1.file != BRW_IMMEDIATE_VALUE)
693 brw_MOV(p, brw_vec4_grf(src1.nr, 0), brw_vec4_grf(dst.nr, 1));
694
695 brw_pop_insn_state(p);
696 }
697
698 static void
699 generate_gs_set_primitive_id(struct brw_codegen *p, struct brw_reg dst)
700 {
701 /* In gen6, PrimitiveID is delivered in R0.1 of the payload */
702 struct brw_reg src = brw_vec8_grf(0, 0);
703 brw_push_insn_state(p);
704 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
705 brw_set_default_access_mode(p, BRW_ALIGN_1);
706 brw_MOV(p, get_element_ud(dst, 0), get_element_ud(src, 1));
707 brw_pop_insn_state(p);
708 }
709
710 static void
711 generate_oword_dual_block_offsets(struct brw_codegen *p,
712 struct brw_reg m1,
713 struct brw_reg index)
714 {
715 int second_vertex_offset;
716
717 if (p->devinfo->gen >= 6)
718 second_vertex_offset = 1;
719 else
720 second_vertex_offset = 16;
721
722 m1 = retype(m1, BRW_REGISTER_TYPE_D);
723
724 /* Set up M1 (message payload). Only the block offsets in M1.0 and
725 * M1.4 are used, and the rest are ignored.
726 */
727 struct brw_reg m1_0 = suboffset(vec1(m1), 0);
728 struct brw_reg m1_4 = suboffset(vec1(m1), 4);
729 struct brw_reg index_0 = suboffset(vec1(index), 0);
730 struct brw_reg index_4 = suboffset(vec1(index), 4);
731
732 brw_push_insn_state(p);
733 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
734 brw_set_default_access_mode(p, BRW_ALIGN_1);
735
736 brw_MOV(p, m1_0, index_0);
737
738 if (index.file == BRW_IMMEDIATE_VALUE) {
739 index_4.dw1.ud += second_vertex_offset;
740 brw_MOV(p, m1_4, index_4);
741 } else {
742 brw_ADD(p, m1_4, index_4, brw_imm_d(second_vertex_offset));
743 }
744
745 brw_pop_insn_state(p);
746 }
747
748 static void
749 generate_unpack_flags(struct brw_codegen *p,
750 struct brw_reg dst)
751 {
752 brw_push_insn_state(p);
753 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
754 brw_set_default_access_mode(p, BRW_ALIGN_1);
755
756 struct brw_reg flags = brw_flag_reg(0, 0);
757 struct brw_reg dst_0 = suboffset(vec1(dst), 0);
758 struct brw_reg dst_4 = suboffset(vec1(dst), 4);
759
760 brw_AND(p, dst_0, flags, brw_imm_ud(0x0f));
761 brw_AND(p, dst_4, flags, brw_imm_ud(0xf0));
762 brw_SHR(p, dst_4, dst_4, brw_imm_ud(4));
763
764 brw_pop_insn_state(p);
765 }
766
767 static void
768 generate_scratch_read(struct brw_codegen *p,
769 vec4_instruction *inst,
770 struct brw_reg dst,
771 struct brw_reg index)
772 {
773 const struct brw_device_info *devinfo = p->devinfo;
774 struct brw_reg header = brw_vec8_grf(0, 0);
775
776 gen6_resolve_implied_move(p, &header, inst->base_mrf);
777
778 generate_oword_dual_block_offsets(p, brw_message_reg(inst->base_mrf + 1),
779 index);
780
781 uint32_t msg_type;
782
783 if (devinfo->gen >= 6)
784 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
785 else if (devinfo->gen == 5 || devinfo->is_g4x)
786 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
787 else
788 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
789
790 /* Each of the 8 channel enables is considered for whether each
791 * dword is written.
792 */
793 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
794 brw_set_dest(p, send, dst);
795 brw_set_src0(p, send, header);
796 if (devinfo->gen < 6)
797 brw_inst_set_cond_modifier(devinfo, send, inst->base_mrf);
798 brw_set_dp_read_message(p, send,
799 255, /* binding table index: stateless access */
800 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
801 msg_type,
802 BRW_DATAPORT_READ_TARGET_RENDER_CACHE,
803 2, /* mlen */
804 true, /* header_present */
805 1 /* rlen */);
806 }
807
808 static void
809 generate_scratch_write(struct brw_codegen *p,
810 vec4_instruction *inst,
811 struct brw_reg dst,
812 struct brw_reg src,
813 struct brw_reg index)
814 {
815 const struct brw_device_info *devinfo = p->devinfo;
816 struct brw_reg header = brw_vec8_grf(0, 0);
817 bool write_commit;
818
819 /* If the instruction is predicated, we'll predicate the send, not
820 * the header setup.
821 */
822 brw_set_default_predicate_control(p, false);
823
824 gen6_resolve_implied_move(p, &header, inst->base_mrf);
825
826 generate_oword_dual_block_offsets(p, brw_message_reg(inst->base_mrf + 1),
827 index);
828
829 brw_MOV(p,
830 retype(brw_message_reg(inst->base_mrf + 2), BRW_REGISTER_TYPE_D),
831 retype(src, BRW_REGISTER_TYPE_D));
832
833 uint32_t msg_type;
834
835 if (devinfo->gen >= 7)
836 msg_type = GEN7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE;
837 else if (devinfo->gen == 6)
838 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
839 else
840 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
841
842 brw_set_default_predicate_control(p, inst->predicate);
843
844 /* Pre-gen6, we have to specify write commits to ensure ordering
845 * between reads and writes within a thread. Afterwards, that's
846 * guaranteed and write commits only matter for inter-thread
847 * synchronization.
848 */
849 if (devinfo->gen >= 6) {
850 write_commit = false;
851 } else {
852 /* The visitor set up our destination register to be g0. This
853 * means that when the next read comes along, we will end up
854 * reading from g0 and causing a block on the write commit. For
855 * write-after-read, we are relying on the value of the previous
856 * read being used (and thus blocking on completion) before our
857 * write is executed. This means we have to be careful in
858 * instruction scheduling to not violate this assumption.
859 */
860 write_commit = true;
861 }
862
863 /* Each of the 8 channel enables is considered for whether each
864 * dword is written.
865 */
866 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
867 brw_set_dest(p, send, dst);
868 brw_set_src0(p, send, header);
869 if (devinfo->gen < 6)
870 brw_inst_set_cond_modifier(p->devinfo, send, inst->base_mrf);
871 brw_set_dp_write_message(p, send,
872 255, /* binding table index: stateless access */
873 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
874 msg_type,
875 3, /* mlen */
876 true, /* header present */
877 false, /* not a render target write */
878 write_commit, /* rlen */
879 false, /* eot */
880 write_commit);
881 }
882
883 static void
884 generate_pull_constant_load(struct brw_codegen *p,
885 struct brw_vue_prog_data *prog_data,
886 vec4_instruction *inst,
887 struct brw_reg dst,
888 struct brw_reg index,
889 struct brw_reg offset)
890 {
891 const struct brw_device_info *devinfo = p->devinfo;
892 assert(index.file == BRW_IMMEDIATE_VALUE &&
893 index.type == BRW_REGISTER_TYPE_UD);
894 uint32_t surf_index = index.dw1.ud;
895
896 struct brw_reg header = brw_vec8_grf(0, 0);
897
898 gen6_resolve_implied_move(p, &header, inst->base_mrf);
899
900 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_D),
901 offset);
902
903 uint32_t msg_type;
904
905 if (devinfo->gen >= 6)
906 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
907 else if (devinfo->gen == 5 || devinfo->is_g4x)
908 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
909 else
910 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
911
912 /* Each of the 8 channel enables is considered for whether each
913 * dword is written.
914 */
915 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
916 brw_set_dest(p, send, dst);
917 brw_set_src0(p, send, header);
918 if (devinfo->gen < 6)
919 brw_inst_set_cond_modifier(p->devinfo, send, inst->base_mrf);
920 brw_set_dp_read_message(p, send,
921 surf_index,
922 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
923 msg_type,
924 BRW_DATAPORT_READ_TARGET_DATA_CACHE,
925 2, /* mlen */
926 true, /* header_present */
927 1 /* rlen */);
928
929 brw_mark_surface_used(&prog_data->base, surf_index);
930 }
931
932 static void
933 generate_get_buffer_size(struct brw_codegen *p,
934 struct brw_vue_prog_data *prog_data,
935 vec4_instruction *inst,
936 struct brw_reg dst,
937 struct brw_reg src,
938 struct brw_reg surf_index)
939 {
940 assert(p->devinfo->gen >= 7);
941 assert(surf_index.type == BRW_REGISTER_TYPE_UD &&
942 surf_index.file == BRW_IMMEDIATE_VALUE);
943
944 brw_SAMPLE(p,
945 dst,
946 inst->base_mrf,
947 src,
948 surf_index.dw1.ud,
949 0,
950 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO,
951 1, /* response length */
952 inst->mlen,
953 inst->header_size > 0,
954 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
955 BRW_SAMPLER_RETURN_FORMAT_SINT32);
956
957 brw_mark_surface_used(&prog_data->base, surf_index.dw1.ud);
958 }
959
960 static void
961 generate_pull_constant_load_gen7(struct brw_codegen *p,
962 struct brw_vue_prog_data *prog_data,
963 vec4_instruction *inst,
964 struct brw_reg dst,
965 struct brw_reg surf_index,
966 struct brw_reg offset)
967 {
968 assert(surf_index.type == BRW_REGISTER_TYPE_UD);
969
970 if (surf_index.file == BRW_IMMEDIATE_VALUE) {
971
972 brw_inst *insn = brw_next_insn(p, BRW_OPCODE_SEND);
973 brw_set_dest(p, insn, dst);
974 brw_set_src0(p, insn, offset);
975 brw_set_sampler_message(p, insn,
976 surf_index.dw1.ud,
977 0, /* LD message ignores sampler unit */
978 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
979 1, /* rlen */
980 inst->mlen,
981 inst->header_size != 0,
982 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
983 0);
984
985 brw_mark_surface_used(&prog_data->base, surf_index.dw1.ud);
986
987 } else {
988
989 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
990
991 brw_push_insn_state(p);
992 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
993 brw_set_default_access_mode(p, BRW_ALIGN_1);
994
995 /* a0.0 = surf_index & 0xff */
996 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
997 brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
998 brw_set_dest(p, insn_and, addr);
999 brw_set_src0(p, insn_and, vec1(retype(surf_index, BRW_REGISTER_TYPE_UD)));
1000 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1001
1002 brw_pop_insn_state(p);
1003
1004 /* dst = send(offset, a0.0 | <descriptor>) */
1005 brw_inst *insn = brw_send_indirect_message(
1006 p, BRW_SFID_SAMPLER, dst, offset, addr);
1007 brw_set_sampler_message(p, insn,
1008 0 /* surface */,
1009 0 /* sampler */,
1010 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1011 1 /* rlen */,
1012 inst->mlen,
1013 inst->header_size != 0,
1014 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1015 0);
1016
1017 /* visitor knows more than we do about the surface limit required,
1018 * so has already done marking.
1019 */
1020 }
1021 }
1022
1023 static void
1024 generate_set_simd4x2_header_gen9(struct brw_codegen *p,
1025 vec4_instruction *inst,
1026 struct brw_reg dst)
1027 {
1028 brw_push_insn_state(p);
1029 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1030
1031 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1032 brw_MOV(p, vec8(dst), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1033
1034 brw_set_default_access_mode(p, BRW_ALIGN_1);
1035 brw_MOV(p, get_element_ud(dst, 2),
1036 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2));
1037
1038 brw_pop_insn_state(p);
1039 }
1040
1041 static void
1042 generate_code(struct brw_codegen *p,
1043 const struct brw_compiler *compiler,
1044 void *log_data,
1045 const nir_shader *nir,
1046 struct brw_vue_prog_data *prog_data,
1047 const struct cfg_t *cfg)
1048 {
1049 const struct brw_device_info *devinfo = p->devinfo;
1050 const char *stage_abbrev = _mesa_shader_stage_to_abbrev(nir->stage);
1051 bool debug_flag = INTEL_DEBUG &
1052 intel_debug_flag_for_shader_stage(nir->stage);
1053 struct annotation_info annotation;
1054 memset(&annotation, 0, sizeof(annotation));
1055 int loop_count = 0;
1056
1057 foreach_block_and_inst (block, vec4_instruction, inst, cfg) {
1058 struct brw_reg src[3], dst;
1059
1060 if (unlikely(debug_flag))
1061 annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset);
1062
1063 for (unsigned int i = 0; i < 3; i++) {
1064 src[i] = inst->src[i].fixed_hw_reg;
1065 }
1066 dst = inst->dst.fixed_hw_reg;
1067
1068 brw_set_default_predicate_control(p, inst->predicate);
1069 brw_set_default_predicate_inverse(p, inst->predicate_inverse);
1070 brw_set_default_flag_reg(p, 0, inst->flag_subreg);
1071 brw_set_default_saturate(p, inst->saturate);
1072 brw_set_default_mask_control(p, inst->force_writemask_all);
1073 brw_set_default_acc_write_control(p, inst->writes_accumulator);
1074
1075 assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
1076 assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
1077
1078 unsigned pre_emit_nr_insn = p->nr_insn;
1079
1080 if (dst.width == BRW_WIDTH_4) {
1081 /* This happens in attribute fixups for "dual instanced" geometry
1082 * shaders, since they use attributes that are vec4's. Since the exec
1083 * width is only 4, it's essential that the caller set
1084 * force_writemask_all in order to make sure the instruction is executed
1085 * regardless of which channels are enabled.
1086 */
1087 assert(inst->force_writemask_all);
1088
1089 /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
1090 * the following register region restrictions (from Graphics BSpec:
1091 * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
1092 * > Register Region Restrictions)
1093 *
1094 * 1. ExecSize must be greater than or equal to Width.
1095 *
1096 * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set
1097 * to Width * HorzStride."
1098 */
1099 for (int i = 0; i < 3; i++) {
1100 if (src[i].file == BRW_GENERAL_REGISTER_FILE)
1101 src[i] = stride(src[i], 4, 4, 1);
1102 }
1103 }
1104
1105 switch (inst->opcode) {
1106 case VEC4_OPCODE_UNPACK_UNIFORM:
1107 case BRW_OPCODE_MOV:
1108 brw_MOV(p, dst, src[0]);
1109 break;
1110 case BRW_OPCODE_ADD:
1111 brw_ADD(p, dst, src[0], src[1]);
1112 break;
1113 case BRW_OPCODE_MUL:
1114 brw_MUL(p, dst, src[0], src[1]);
1115 break;
1116 case BRW_OPCODE_MACH:
1117 brw_MACH(p, dst, src[0], src[1]);
1118 break;
1119
1120 case BRW_OPCODE_MAD:
1121 assert(devinfo->gen >= 6);
1122 brw_MAD(p, dst, src[0], src[1], src[2]);
1123 break;
1124
1125 case BRW_OPCODE_FRC:
1126 brw_FRC(p, dst, src[0]);
1127 break;
1128 case BRW_OPCODE_RNDD:
1129 brw_RNDD(p, dst, src[0]);
1130 break;
1131 case BRW_OPCODE_RNDE:
1132 brw_RNDE(p, dst, src[0]);
1133 break;
1134 case BRW_OPCODE_RNDZ:
1135 brw_RNDZ(p, dst, src[0]);
1136 break;
1137
1138 case BRW_OPCODE_AND:
1139 brw_AND(p, dst, src[0], src[1]);
1140 break;
1141 case BRW_OPCODE_OR:
1142 brw_OR(p, dst, src[0], src[1]);
1143 break;
1144 case BRW_OPCODE_XOR:
1145 brw_XOR(p, dst, src[0], src[1]);
1146 break;
1147 case BRW_OPCODE_NOT:
1148 brw_NOT(p, dst, src[0]);
1149 break;
1150 case BRW_OPCODE_ASR:
1151 brw_ASR(p, dst, src[0], src[1]);
1152 break;
1153 case BRW_OPCODE_SHR:
1154 brw_SHR(p, dst, src[0], src[1]);
1155 break;
1156 case BRW_OPCODE_SHL:
1157 brw_SHL(p, dst, src[0], src[1]);
1158 break;
1159
1160 case BRW_OPCODE_CMP:
1161 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
1162 break;
1163 case BRW_OPCODE_SEL:
1164 brw_SEL(p, dst, src[0], src[1]);
1165 break;
1166
1167 case BRW_OPCODE_DPH:
1168 brw_DPH(p, dst, src[0], src[1]);
1169 break;
1170
1171 case BRW_OPCODE_DP4:
1172 brw_DP4(p, dst, src[0], src[1]);
1173 break;
1174
1175 case BRW_OPCODE_DP3:
1176 brw_DP3(p, dst, src[0], src[1]);
1177 break;
1178
1179 case BRW_OPCODE_DP2:
1180 brw_DP2(p, dst, src[0], src[1]);
1181 break;
1182
1183 case BRW_OPCODE_F32TO16:
1184 assert(devinfo->gen >= 7);
1185 brw_F32TO16(p, dst, src[0]);
1186 break;
1187
1188 case BRW_OPCODE_F16TO32:
1189 assert(devinfo->gen >= 7);
1190 brw_F16TO32(p, dst, src[0]);
1191 break;
1192
1193 case BRW_OPCODE_LRP:
1194 assert(devinfo->gen >= 6);
1195 brw_LRP(p, dst, src[0], src[1], src[2]);
1196 break;
1197
1198 case BRW_OPCODE_BFREV:
1199 assert(devinfo->gen >= 7);
1200 /* BFREV only supports UD type for src and dst. */
1201 brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
1202 retype(src[0], BRW_REGISTER_TYPE_UD));
1203 break;
1204 case BRW_OPCODE_FBH:
1205 assert(devinfo->gen >= 7);
1206 /* FBH only supports UD type for dst. */
1207 brw_FBH(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1208 break;
1209 case BRW_OPCODE_FBL:
1210 assert(devinfo->gen >= 7);
1211 /* FBL only supports UD type for dst. */
1212 brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1213 break;
1214 case BRW_OPCODE_CBIT:
1215 assert(devinfo->gen >= 7);
1216 /* CBIT only supports UD type for dst. */
1217 brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1218 break;
1219 case BRW_OPCODE_ADDC:
1220 assert(devinfo->gen >= 7);
1221 brw_ADDC(p, dst, src[0], src[1]);
1222 break;
1223 case BRW_OPCODE_SUBB:
1224 assert(devinfo->gen >= 7);
1225 brw_SUBB(p, dst, src[0], src[1]);
1226 break;
1227 case BRW_OPCODE_MAC:
1228 brw_MAC(p, dst, src[0], src[1]);
1229 break;
1230
1231 case BRW_OPCODE_BFE:
1232 assert(devinfo->gen >= 7);
1233 brw_BFE(p, dst, src[0], src[1], src[2]);
1234 break;
1235
1236 case BRW_OPCODE_BFI1:
1237 assert(devinfo->gen >= 7);
1238 brw_BFI1(p, dst, src[0], src[1]);
1239 break;
1240 case BRW_OPCODE_BFI2:
1241 assert(devinfo->gen >= 7);
1242 brw_BFI2(p, dst, src[0], src[1], src[2]);
1243 break;
1244
1245 case BRW_OPCODE_IF:
1246 if (inst->src[0].file != BAD_FILE) {
1247 /* The instruction has an embedded compare (only allowed on gen6) */
1248 assert(devinfo->gen == 6);
1249 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
1250 } else {
1251 brw_inst *if_inst = brw_IF(p, BRW_EXECUTE_8);
1252 brw_inst_set_pred_control(p->devinfo, if_inst, inst->predicate);
1253 }
1254 break;
1255
1256 case BRW_OPCODE_ELSE:
1257 brw_ELSE(p);
1258 break;
1259 case BRW_OPCODE_ENDIF:
1260 brw_ENDIF(p);
1261 break;
1262
1263 case BRW_OPCODE_DO:
1264 brw_DO(p, BRW_EXECUTE_8);
1265 break;
1266
1267 case BRW_OPCODE_BREAK:
1268 brw_BREAK(p);
1269 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1270 break;
1271 case BRW_OPCODE_CONTINUE:
1272 brw_CONT(p);
1273 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1274 break;
1275
1276 case BRW_OPCODE_WHILE:
1277 brw_WHILE(p);
1278 loop_count++;
1279 break;
1280
1281 case SHADER_OPCODE_RCP:
1282 case SHADER_OPCODE_RSQ:
1283 case SHADER_OPCODE_SQRT:
1284 case SHADER_OPCODE_EXP2:
1285 case SHADER_OPCODE_LOG2:
1286 case SHADER_OPCODE_SIN:
1287 case SHADER_OPCODE_COS:
1288 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1289 if (devinfo->gen >= 7) {
1290 gen6_math(p, dst, brw_math_function(inst->opcode), src[0],
1291 brw_null_reg());
1292 } else if (devinfo->gen == 6) {
1293 generate_math_gen6(p, inst, dst, src[0], brw_null_reg());
1294 } else {
1295 generate_math1_gen4(p, inst, dst, src[0]);
1296 }
1297 break;
1298
1299 case SHADER_OPCODE_POW:
1300 case SHADER_OPCODE_INT_QUOTIENT:
1301 case SHADER_OPCODE_INT_REMAINDER:
1302 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1303 if (devinfo->gen >= 7) {
1304 gen6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
1305 } else if (devinfo->gen == 6) {
1306 generate_math_gen6(p, inst, dst, src[0], src[1]);
1307 } else {
1308 generate_math2_gen4(p, inst, dst, src[0], src[1]);
1309 }
1310 break;
1311
1312 case SHADER_OPCODE_TEX:
1313 case SHADER_OPCODE_TXD:
1314 case SHADER_OPCODE_TXF:
1315 case SHADER_OPCODE_TXF_CMS:
1316 case SHADER_OPCODE_TXF_MCS:
1317 case SHADER_OPCODE_TXL:
1318 case SHADER_OPCODE_TXS:
1319 case SHADER_OPCODE_TG4:
1320 case SHADER_OPCODE_TG4_OFFSET:
1321 case SHADER_OPCODE_SAMPLEINFO:
1322 generate_tex(p, prog_data, inst, dst, src[0], src[1]);
1323 break;
1324
1325 case VS_OPCODE_URB_WRITE:
1326 generate_vs_urb_write(p, inst);
1327 break;
1328
1329 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1330 generate_scratch_read(p, inst, dst, src[0]);
1331 break;
1332
1333 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1334 generate_scratch_write(p, inst, dst, src[0], src[1]);
1335 break;
1336
1337 case VS_OPCODE_PULL_CONSTANT_LOAD:
1338 generate_pull_constant_load(p, prog_data, inst, dst, src[0], src[1]);
1339 break;
1340
1341 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
1342 generate_pull_constant_load_gen7(p, prog_data, inst, dst, src[0], src[1]);
1343 break;
1344
1345 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9:
1346 generate_set_simd4x2_header_gen9(p, inst, dst);
1347 break;
1348
1349
1350 case VS_OPCODE_GET_BUFFER_SIZE:
1351 generate_get_buffer_size(p, prog_data, inst, dst, src[0], src[1]);
1352 break;
1353
1354 case GS_OPCODE_URB_WRITE:
1355 generate_gs_urb_write(p, inst);
1356 break;
1357
1358 case GS_OPCODE_URB_WRITE_ALLOCATE:
1359 generate_gs_urb_write_allocate(p, inst);
1360 break;
1361
1362 case GS_OPCODE_SVB_WRITE:
1363 generate_gs_svb_write(p, prog_data, inst, dst, src[0], src[1]);
1364 break;
1365
1366 case GS_OPCODE_SVB_SET_DST_INDEX:
1367 generate_gs_svb_set_destination_index(p, inst, dst, src[0]);
1368 break;
1369
1370 case GS_OPCODE_THREAD_END:
1371 generate_gs_thread_end(p, inst);
1372 break;
1373
1374 case GS_OPCODE_SET_WRITE_OFFSET:
1375 generate_gs_set_write_offset(p, dst, src[0], src[1]);
1376 break;
1377
1378 case GS_OPCODE_SET_VERTEX_COUNT:
1379 generate_gs_set_vertex_count(p, dst, src[0]);
1380 break;
1381
1382 case GS_OPCODE_FF_SYNC:
1383 generate_gs_ff_sync(p, inst, dst, src[0], src[1]);
1384 break;
1385
1386 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES:
1387 generate_gs_ff_sync_set_primitives(p, dst, src[0], src[1], src[2]);
1388 break;
1389
1390 case GS_OPCODE_SET_PRIMITIVE_ID:
1391 generate_gs_set_primitive_id(p, dst);
1392 break;
1393
1394 case GS_OPCODE_SET_DWORD_2:
1395 generate_gs_set_dword_2(p, dst, src[0]);
1396 break;
1397
1398 case GS_OPCODE_PREPARE_CHANNEL_MASKS:
1399 generate_gs_prepare_channel_masks(p, dst);
1400 break;
1401
1402 case GS_OPCODE_SET_CHANNEL_MASKS:
1403 generate_gs_set_channel_masks(p, dst, src[0]);
1404 break;
1405
1406 case GS_OPCODE_GET_INSTANCE_ID:
1407 generate_gs_get_instance_id(p, dst);
1408 break;
1409
1410 case SHADER_OPCODE_SHADER_TIME_ADD:
1411 brw_shader_time_add(p, src[0],
1412 prog_data->base.binding_table.shader_time_start);
1413 brw_mark_surface_used(&prog_data->base,
1414 prog_data->base.binding_table.shader_time_start);
1415 break;
1416
1417 case SHADER_OPCODE_UNTYPED_ATOMIC:
1418 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1419 brw_untyped_atomic(p, dst, src[0], src[1], src[2].dw1.ud, inst->mlen,
1420 !inst->dst.is_null());
1421 break;
1422
1423 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
1424 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1425 brw_untyped_surface_read(p, dst, src[0], src[1], inst->mlen,
1426 src[2].dw1.ud);
1427 break;
1428
1429 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
1430 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1431 brw_untyped_surface_write(p, src[0], src[1], inst->mlen,
1432 src[2].dw1.ud);
1433 break;
1434
1435 case SHADER_OPCODE_TYPED_ATOMIC:
1436 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1437 brw_typed_atomic(p, dst, src[0], src[1], src[2].dw1.ud, inst->mlen,
1438 !inst->dst.is_null());
1439 break;
1440
1441 case SHADER_OPCODE_TYPED_SURFACE_READ:
1442 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1443 brw_typed_surface_read(p, dst, src[0], src[1], inst->mlen,
1444 src[2].dw1.ud);
1445 break;
1446
1447 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
1448 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1449 brw_typed_surface_write(p, src[0], src[1], inst->mlen,
1450 src[2].dw1.ud);
1451 break;
1452
1453 case SHADER_OPCODE_MEMORY_FENCE:
1454 brw_memory_fence(p, dst);
1455 break;
1456
1457 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
1458 brw_find_live_channel(p, dst);
1459 break;
1460
1461 case SHADER_OPCODE_BROADCAST:
1462 brw_broadcast(p, dst, src[0], src[1]);
1463 break;
1464
1465 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2:
1466 generate_unpack_flags(p, dst);
1467 break;
1468
1469 case VEC4_OPCODE_MOV_BYTES: {
1470 /* Moves the low byte from each channel, using an Align1 access mode
1471 * and a <4,1,0> source region.
1472 */
1473 assert(src[0].type == BRW_REGISTER_TYPE_UB ||
1474 src[0].type == BRW_REGISTER_TYPE_B);
1475
1476 brw_set_default_access_mode(p, BRW_ALIGN_1);
1477 src[0].vstride = BRW_VERTICAL_STRIDE_4;
1478 src[0].width = BRW_WIDTH_1;
1479 src[0].hstride = BRW_HORIZONTAL_STRIDE_0;
1480 brw_MOV(p, dst, src[0]);
1481 brw_set_default_access_mode(p, BRW_ALIGN_16);
1482 break;
1483 }
1484
1485 case VEC4_OPCODE_PACK_BYTES: {
1486 /* Is effectively:
1487 *
1488 * mov(8) dst<16,4,1>:UB src<4,1,0>:UB
1489 *
1490 * but destinations' only regioning is horizontal stride, so instead we
1491 * have to use two instructions:
1492 *
1493 * mov(4) dst<1>:UB src<4,1,0>:UB
1494 * mov(4) dst.16<1>:UB src.16<4,1,0>:UB
1495 *
1496 * where they pack the four bytes from the low and high four DW.
1497 */
1498 assert(_mesa_is_pow_two(dst.dw1.bits.writemask) &&
1499 dst.dw1.bits.writemask != 0);
1500 unsigned offset = __builtin_ctz(dst.dw1.bits.writemask);
1501
1502 dst.type = BRW_REGISTER_TYPE_UB;
1503
1504 brw_set_default_access_mode(p, BRW_ALIGN_1);
1505
1506 src[0].type = BRW_REGISTER_TYPE_UB;
1507 src[0].vstride = BRW_VERTICAL_STRIDE_4;
1508 src[0].width = BRW_WIDTH_1;
1509 src[0].hstride = BRW_HORIZONTAL_STRIDE_0;
1510 dst.subnr = offset * 4;
1511 struct brw_inst *insn = brw_MOV(p, dst, src[0]);
1512 brw_inst_set_exec_size(p->devinfo, insn, BRW_EXECUTE_4);
1513 brw_inst_set_no_dd_clear(p->devinfo, insn, true);
1514 brw_inst_set_no_dd_check(p->devinfo, insn, inst->no_dd_check);
1515
1516 src[0].subnr = 16;
1517 dst.subnr = 16 + offset * 4;
1518 insn = brw_MOV(p, dst, src[0]);
1519 brw_inst_set_exec_size(p->devinfo, insn, BRW_EXECUTE_4);
1520 brw_inst_set_no_dd_clear(p->devinfo, insn, inst->no_dd_clear);
1521 brw_inst_set_no_dd_check(p->devinfo, insn, true);
1522
1523 brw_set_default_access_mode(p, BRW_ALIGN_16);
1524 break;
1525 }
1526
1527 default:
1528 unreachable("Unsupported opcode");
1529 }
1530
1531 if (inst->opcode == VEC4_OPCODE_PACK_BYTES) {
1532 /* Handled dependency hints in the generator. */
1533
1534 assert(!inst->conditional_mod);
1535 } else if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
1536 assert(p->nr_insn == pre_emit_nr_insn + 1 ||
1537 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
1538 "emitting more than 1 instruction");
1539
1540 brw_inst *last = &p->store[pre_emit_nr_insn];
1541
1542 if (inst->conditional_mod)
1543 brw_inst_set_cond_modifier(p->devinfo, last, inst->conditional_mod);
1544 brw_inst_set_no_dd_clear(p->devinfo, last, inst->no_dd_clear);
1545 brw_inst_set_no_dd_check(p->devinfo, last, inst->no_dd_check);
1546 }
1547 }
1548
1549 brw_set_uip_jip(p);
1550 annotation_finalize(&annotation, p->next_insn_offset);
1551
1552 int before_size = p->next_insn_offset;
1553 brw_compact_instructions(p, 0, annotation.ann_count, annotation.ann);
1554 int after_size = p->next_insn_offset;
1555
1556 if (unlikely(debug_flag)) {
1557 fprintf(stderr, "Native code for %s %s shader %s:\n",
1558 nir->info.label ? nir->info.label : "unnamed",
1559 _mesa_shader_stage_to_string(nir->stage), nir->info.name);
1560
1561 fprintf(stderr, "%s vec4 shader: %d instructions. %d loops. Compacted %d to %d"
1562 " bytes (%.0f%%)\n",
1563 stage_abbrev,
1564 before_size / 16, loop_count, before_size, after_size,
1565 100.0f * (before_size - after_size) / before_size);
1566
1567 dump_assembly(p->store, annotation.ann_count, annotation.ann,
1568 p->devinfo);
1569 ralloc_free(annotation.ann);
1570 }
1571
1572 compiler->shader_debug_log(log_data,
1573 "%s vec4 shader: %d inst, %d loops, "
1574 "compacted %d to %d bytes.\n",
1575 stage_abbrev, before_size / 16, loop_count,
1576 before_size, after_size);
1577 }
1578
1579 extern "C" const unsigned *
1580 brw_vec4_generate_assembly(const struct brw_compiler *compiler,
1581 void *log_data,
1582 void *mem_ctx,
1583 const nir_shader *nir,
1584 struct brw_vue_prog_data *prog_data,
1585 const struct cfg_t *cfg,
1586 unsigned *out_assembly_size)
1587 {
1588 struct brw_codegen *p = rzalloc(mem_ctx, struct brw_codegen);
1589 brw_init_codegen(compiler->devinfo, p, mem_ctx);
1590 brw_set_default_access_mode(p, BRW_ALIGN_16);
1591
1592 generate_code(p, compiler, log_data, nir, prog_data, cfg);
1593
1594 return brw_get_program(p, out_assembly_size);
1595 }