intel/compiler: Allow MESA_SHADER_KERNEL
[mesa.git] / src / intel / compiler / brw_vec4_generator.cpp
1 /* Copyright © 2011 Intel Corporation
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
12 * Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 * IN THE SOFTWARE.
21 */
22
23 #include "brw_vec4.h"
24 #include "brw_cfg.h"
25 #include "brw_eu.h"
26 #include "dev/gen_debug.h"
27 #include "util/mesa-sha1.h"
28
29 using namespace brw;
30
31 static void
32 generate_math1_gen4(struct brw_codegen *p,
33 vec4_instruction *inst,
34 struct brw_reg dst,
35 struct brw_reg src)
36 {
37 gen4_math(p,
38 dst,
39 brw_math_function(inst->opcode),
40 inst->base_mrf,
41 src,
42 BRW_MATH_PRECISION_FULL);
43 }
44
45 static void
46 check_gen6_math_src_arg(struct brw_reg src)
47 {
48 /* Source swizzles are ignored. */
49 assert(!src.abs);
50 assert(!src.negate);
51 assert(src.swizzle == BRW_SWIZZLE_XYZW);
52 }
53
54 static void
55 generate_math_gen6(struct brw_codegen *p,
56 vec4_instruction *inst,
57 struct brw_reg dst,
58 struct brw_reg src0,
59 struct brw_reg src1)
60 {
61 /* Can't do writemask because math can't be align16. */
62 assert(dst.writemask == WRITEMASK_XYZW);
63 /* Source swizzles are ignored. */
64 check_gen6_math_src_arg(src0);
65 if (src1.file == BRW_GENERAL_REGISTER_FILE)
66 check_gen6_math_src_arg(src1);
67
68 brw_set_default_access_mode(p, BRW_ALIGN_1);
69 gen6_math(p, dst, brw_math_function(inst->opcode), src0, src1);
70 brw_set_default_access_mode(p, BRW_ALIGN_16);
71 }
72
73 static void
74 generate_math2_gen4(struct brw_codegen *p,
75 vec4_instruction *inst,
76 struct brw_reg dst,
77 struct brw_reg src0,
78 struct brw_reg src1)
79 {
80 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
81 * "Message Payload":
82 *
83 * "Operand0[7]. For the INT DIV functions, this operand is the
84 * denominator."
85 * ...
86 * "Operand1[7]. For the INT DIV functions, this operand is the
87 * numerator."
88 */
89 bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
90 struct brw_reg &op0 = is_int_div ? src1 : src0;
91 struct brw_reg &op1 = is_int_div ? src0 : src1;
92
93 brw_push_insn_state(p);
94 brw_set_default_saturate(p, false);
95 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
96 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), op1.type), op1);
97 brw_pop_insn_state(p);
98
99 gen4_math(p,
100 dst,
101 brw_math_function(inst->opcode),
102 inst->base_mrf,
103 op0,
104 BRW_MATH_PRECISION_FULL);
105 }
106
107 static void
108 generate_tex(struct brw_codegen *p,
109 struct brw_vue_prog_data *prog_data,
110 gl_shader_stage stage,
111 vec4_instruction *inst,
112 struct brw_reg dst,
113 struct brw_reg src,
114 struct brw_reg surface_index,
115 struct brw_reg sampler_index)
116 {
117 const struct gen_device_info *devinfo = p->devinfo;
118 int msg_type = -1;
119
120 if (devinfo->gen >= 5) {
121 switch (inst->opcode) {
122 case SHADER_OPCODE_TEX:
123 case SHADER_OPCODE_TXL:
124 if (inst->shadow_compare) {
125 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
126 } else {
127 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
128 }
129 break;
130 case SHADER_OPCODE_TXD:
131 if (inst->shadow_compare) {
132 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
133 assert(devinfo->gen >= 8 || devinfo->is_haswell);
134 msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE;
135 } else {
136 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
137 }
138 break;
139 case SHADER_OPCODE_TXF:
140 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
141 break;
142 case SHADER_OPCODE_TXF_CMS_W:
143 assert(devinfo->gen >= 9);
144 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W;
145 break;
146 case SHADER_OPCODE_TXF_CMS:
147 if (devinfo->gen >= 7)
148 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
149 else
150 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
151 break;
152 case SHADER_OPCODE_TXF_MCS:
153 assert(devinfo->gen >= 7);
154 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
155 break;
156 case SHADER_OPCODE_TXS:
157 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
158 break;
159 case SHADER_OPCODE_TG4:
160 if (inst->shadow_compare) {
161 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C;
162 } else {
163 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
164 }
165 break;
166 case SHADER_OPCODE_TG4_OFFSET:
167 if (inst->shadow_compare) {
168 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C;
169 } else {
170 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
171 }
172 break;
173 case SHADER_OPCODE_SAMPLEINFO:
174 msg_type = GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
175 break;
176 default:
177 unreachable("should not get here: invalid vec4 texture opcode");
178 }
179 } else {
180 switch (inst->opcode) {
181 case SHADER_OPCODE_TEX:
182 case SHADER_OPCODE_TXL:
183 if (inst->shadow_compare) {
184 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE;
185 assert(inst->mlen == 3);
186 } else {
187 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD;
188 assert(inst->mlen == 2);
189 }
190 break;
191 case SHADER_OPCODE_TXD:
192 /* There is no sample_d_c message; comparisons are done manually. */
193 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS;
194 assert(inst->mlen == 4);
195 break;
196 case SHADER_OPCODE_TXF:
197 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_LD;
198 assert(inst->mlen == 2);
199 break;
200 case SHADER_OPCODE_TXS:
201 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO;
202 assert(inst->mlen == 2);
203 break;
204 default:
205 unreachable("should not get here: invalid vec4 texture opcode");
206 }
207 }
208
209 assert(msg_type != -1);
210
211 assert(sampler_index.type == BRW_REGISTER_TYPE_UD);
212
213 /* Load the message header if present. If there's a texture offset, we need
214 * to set it up explicitly and load the offset bitfield. Otherwise, we can
215 * use an implied move from g0 to the first message register.
216 */
217 if (inst->header_size != 0) {
218 if (devinfo->gen < 6 && !inst->offset) {
219 /* Set up an implied move from g0 to the MRF. */
220 src = brw_vec8_grf(0, 0);
221 } else {
222 struct brw_reg header =
223 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD);
224 uint32_t dw2 = 0;
225
226 /* Explicitly set up the message header by copying g0 to the MRF. */
227 brw_push_insn_state(p);
228 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
229 brw_MOV(p, header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
230
231 brw_set_default_access_mode(p, BRW_ALIGN_1);
232
233 if (inst->offset)
234 /* Set the texel offset bits in DWord 2. */
235 dw2 = inst->offset;
236
237 if (devinfo->gen >= 9)
238 /* SKL+ overloads BRW_SAMPLER_SIMD_MODE_SIMD4X2 to also do SIMD8D,
239 * based on bit 22 in the header.
240 */
241 dw2 |= GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2;
242
243 /* The VS, DS, and FS stages have the g0.2 payload delivered as 0,
244 * so header0.2 is 0 when g0 is copied. The HS and GS stages do
245 * not, so we must set to to 0 to avoid setting undesirable bits
246 * in the message header.
247 */
248 if (dw2 ||
249 stage == MESA_SHADER_TESS_CTRL ||
250 stage == MESA_SHADER_GEOMETRY) {
251 brw_MOV(p, get_element_ud(header, 2), brw_imm_ud(dw2));
252 }
253
254 brw_adjust_sampler_state_pointer(p, header, sampler_index);
255 brw_pop_insn_state(p);
256 }
257 }
258
259 uint32_t return_format;
260
261 switch (dst.type) {
262 case BRW_REGISTER_TYPE_D:
263 return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
264 break;
265 case BRW_REGISTER_TYPE_UD:
266 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
267 break;
268 default:
269 return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
270 break;
271 }
272
273 /* Stomp the resinfo output type to UINT32. On gens 4-5, the output type
274 * is set as part of the message descriptor. On gen4, the PRM seems to
275 * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on
276 * later gens UINT32 is required. Once you hit Sandy Bridge, the bit is
277 * gone from the message descriptor entirely and you just get UINT32 all
278 * the time regasrdless. Since we can really only do non-UINT32 on gen4,
279 * just stomp it to UINT32 all the time.
280 */
281 if (inst->opcode == SHADER_OPCODE_TXS)
282 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
283
284 uint32_t base_binding_table_index = (inst->opcode == SHADER_OPCODE_TG4 ||
285 inst->opcode == SHADER_OPCODE_TG4_OFFSET)
286 ? prog_data->base.binding_table.gather_texture_start
287 : prog_data->base.binding_table.texture_start;
288
289 if (surface_index.file == BRW_IMMEDIATE_VALUE &&
290 sampler_index.file == BRW_IMMEDIATE_VALUE) {
291 uint32_t surface = surface_index.ud;
292 uint32_t sampler = sampler_index.ud;
293
294 brw_SAMPLE(p,
295 dst,
296 inst->base_mrf,
297 src,
298 surface + base_binding_table_index,
299 sampler % 16,
300 msg_type,
301 1, /* response length */
302 inst->mlen,
303 inst->header_size != 0,
304 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
305 return_format);
306 } else {
307 /* Non-constant sampler index. */
308
309 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
310 struct brw_reg surface_reg = vec1(retype(surface_index, BRW_REGISTER_TYPE_UD));
311 struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD));
312
313 brw_push_insn_state(p);
314 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
315 brw_set_default_access_mode(p, BRW_ALIGN_1);
316
317 if (brw_regs_equal(&surface_reg, &sampler_reg)) {
318 brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
319 } else {
320 if (sampler_reg.file == BRW_IMMEDIATE_VALUE) {
321 brw_OR(p, addr, surface_reg, brw_imm_ud(sampler_reg.ud << 8));
322 } else {
323 brw_SHL(p, addr, sampler_reg, brw_imm_ud(8));
324 brw_OR(p, addr, addr, surface_reg);
325 }
326 }
327 if (base_binding_table_index)
328 brw_ADD(p, addr, addr, brw_imm_ud(base_binding_table_index));
329 brw_AND(p, addr, addr, brw_imm_ud(0xfff));
330
331 brw_pop_insn_state(p);
332
333 if (inst->base_mrf != -1)
334 gen6_resolve_implied_move(p, &src, inst->base_mrf);
335
336 /* dst = send(offset, a0.0 | <descriptor>) */
337 brw_send_indirect_message(
338 p, BRW_SFID_SAMPLER, dst, src, addr,
339 brw_message_desc(devinfo, inst->mlen, 1, inst->header_size) |
340 brw_sampler_desc(devinfo,
341 0 /* surface */,
342 0 /* sampler */,
343 msg_type,
344 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
345 return_format),
346 false /* EOT */);
347
348 /* visitor knows more than we do about the surface limit required,
349 * so has already done marking.
350 */
351 }
352 }
353
354 static void
355 generate_vs_urb_write(struct brw_codegen *p, vec4_instruction *inst)
356 {
357 brw_urb_WRITE(p,
358 brw_null_reg(), /* dest */
359 inst->base_mrf, /* starting mrf reg nr */
360 brw_vec8_grf(0, 0), /* src */
361 inst->urb_write_flags,
362 inst->mlen,
363 0, /* response len */
364 inst->offset, /* urb destination offset */
365 BRW_URB_SWIZZLE_INTERLEAVE);
366 }
367
368 static void
369 generate_gs_urb_write(struct brw_codegen *p, vec4_instruction *inst)
370 {
371 struct brw_reg src = brw_message_reg(inst->base_mrf);
372 brw_urb_WRITE(p,
373 brw_null_reg(), /* dest */
374 inst->base_mrf, /* starting mrf reg nr */
375 src,
376 inst->urb_write_flags,
377 inst->mlen,
378 0, /* response len */
379 inst->offset, /* urb destination offset */
380 BRW_URB_SWIZZLE_INTERLEAVE);
381 }
382
383 static void
384 generate_gs_urb_write_allocate(struct brw_codegen *p, vec4_instruction *inst)
385 {
386 struct brw_reg src = brw_message_reg(inst->base_mrf);
387
388 /* We pass the temporary passed in src0 as the writeback register */
389 brw_urb_WRITE(p,
390 inst->src[0].as_brw_reg(), /* dest */
391 inst->base_mrf, /* starting mrf reg nr */
392 src,
393 BRW_URB_WRITE_ALLOCATE_COMPLETE,
394 inst->mlen,
395 1, /* response len */
396 inst->offset, /* urb destination offset */
397 BRW_URB_SWIZZLE_INTERLEAVE);
398
399 /* Now put allocated urb handle in dst.0 */
400 brw_push_insn_state(p);
401 brw_set_default_access_mode(p, BRW_ALIGN_1);
402 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
403 brw_MOV(p, get_element_ud(inst->dst.as_brw_reg(), 0),
404 get_element_ud(inst->src[0].as_brw_reg(), 0));
405 brw_pop_insn_state(p);
406 }
407
408 static void
409 generate_gs_thread_end(struct brw_codegen *p, vec4_instruction *inst)
410 {
411 struct brw_reg src = brw_message_reg(inst->base_mrf);
412 brw_urb_WRITE(p,
413 brw_null_reg(), /* dest */
414 inst->base_mrf, /* starting mrf reg nr */
415 src,
416 BRW_URB_WRITE_EOT | inst->urb_write_flags,
417 inst->mlen,
418 0, /* response len */
419 0, /* urb destination offset */
420 BRW_URB_SWIZZLE_INTERLEAVE);
421 }
422
423 static void
424 generate_gs_set_write_offset(struct brw_codegen *p,
425 struct brw_reg dst,
426 struct brw_reg src0,
427 struct brw_reg src1)
428 {
429 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
430 * Header: M0.3):
431 *
432 * Slot 0 Offset. This field, after adding to the Global Offset field
433 * in the message descriptor, specifies the offset (in 256-bit units)
434 * from the start of the URB entry, as referenced by URB Handle 0, at
435 * which the data will be accessed.
436 *
437 * Similar text describes DWORD M0.4, which is slot 1 offset.
438 *
439 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
440 * of the register for geometry shader invocations 0 and 1) by the
441 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
442 *
443 * We can do this with the following EU instruction:
444 *
445 * mul(2) dst.3<1>UD src0<8;2,4>UD src1<...>UW { Align1 WE_all }
446 */
447 brw_push_insn_state(p);
448 brw_set_default_access_mode(p, BRW_ALIGN_1);
449 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
450 assert(p->devinfo->gen >= 7 &&
451 src1.file == BRW_IMMEDIATE_VALUE &&
452 src1.type == BRW_REGISTER_TYPE_UD &&
453 src1.ud <= USHRT_MAX);
454 if (src0.file == BRW_IMMEDIATE_VALUE) {
455 brw_MOV(p, suboffset(stride(dst, 2, 2, 1), 3),
456 brw_imm_ud(src0.ud * src1.ud));
457 } else {
458 if (src1.file == BRW_IMMEDIATE_VALUE) {
459 src1 = brw_imm_uw(src1.ud);
460 }
461 brw_MUL(p, suboffset(stride(dst, 2, 2, 1), 3), stride(src0, 8, 2, 4),
462 retype(src1, BRW_REGISTER_TYPE_UW));
463 }
464 brw_pop_insn_state(p);
465 }
466
467 static void
468 generate_gs_set_vertex_count(struct brw_codegen *p,
469 struct brw_reg dst,
470 struct brw_reg src)
471 {
472 brw_push_insn_state(p);
473 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
474
475 if (p->devinfo->gen >= 8) {
476 /* Move the vertex count into the second MRF for the EOT write. */
477 brw_MOV(p, retype(brw_message_reg(dst.nr + 1), BRW_REGISTER_TYPE_UD),
478 src);
479 } else {
480 /* If we think of the src and dst registers as composed of 8 DWORDs each,
481 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
482 * them to WORDs, and then pack them into DWORD 2 of dst.
483 *
484 * It's easier to get the EU to do this if we think of the src and dst
485 * registers as composed of 16 WORDS each; then, we want to pick up the
486 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5
487 * of dst.
488 *
489 * We can do that by the following EU instruction:
490 *
491 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
492 */
493 brw_set_default_access_mode(p, BRW_ALIGN_1);
494 brw_MOV(p,
495 suboffset(stride(retype(dst, BRW_REGISTER_TYPE_UW), 2, 2, 1), 4),
496 stride(retype(src, BRW_REGISTER_TYPE_UW), 8, 1, 0));
497 }
498 brw_pop_insn_state(p);
499 }
500
501 static void
502 generate_gs_svb_write(struct brw_codegen *p,
503 struct brw_vue_prog_data *prog_data,
504 vec4_instruction *inst,
505 struct brw_reg dst,
506 struct brw_reg src0,
507 struct brw_reg src1)
508 {
509 int binding = inst->sol_binding;
510 bool final_write = inst->sol_final_write;
511
512 brw_push_insn_state(p);
513 brw_set_default_exec_size(p, BRW_EXECUTE_4);
514 /* Copy Vertex data into M0.x */
515 brw_MOV(p, stride(dst, 4, 4, 1),
516 stride(retype(src0, BRW_REGISTER_TYPE_UD), 4, 4, 1));
517 brw_pop_insn_state(p);
518
519 brw_push_insn_state(p);
520 /* Send SVB Write */
521 brw_svb_write(p,
522 final_write ? src1 : brw_null_reg(), /* dest == src1 */
523 1, /* msg_reg_nr */
524 dst, /* src0 == previous dst */
525 BRW_GEN6_SOL_BINDING_START + binding, /* binding_table_index */
526 final_write); /* send_commit_msg */
527
528 /* Finally, wait for the write commit to occur so that we can proceed to
529 * other things safely.
530 *
531 * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
532 *
533 * The write commit does not modify the destination register, but
534 * merely clears the dependency associated with the destination
535 * register. Thus, a simple “mov” instruction using the register as a
536 * source is sufficient to wait for the write commit to occur.
537 */
538 if (final_write) {
539 brw_MOV(p, src1, src1);
540 }
541 brw_pop_insn_state(p);
542 }
543
544 static void
545 generate_gs_svb_set_destination_index(struct brw_codegen *p,
546 vec4_instruction *inst,
547 struct brw_reg dst,
548 struct brw_reg src)
549 {
550 int vertex = inst->sol_vertex;
551 brw_push_insn_state(p);
552 brw_set_default_access_mode(p, BRW_ALIGN_1);
553 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
554 brw_MOV(p, get_element_ud(dst, 5), get_element_ud(src, vertex));
555 brw_pop_insn_state(p);
556 }
557
558 static void
559 generate_gs_set_dword_2(struct brw_codegen *p,
560 struct brw_reg dst,
561 struct brw_reg src)
562 {
563 brw_push_insn_state(p);
564 brw_set_default_access_mode(p, BRW_ALIGN_1);
565 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
566 brw_MOV(p, suboffset(vec1(dst), 2), suboffset(vec1(src), 0));
567 brw_pop_insn_state(p);
568 }
569
570 static void
571 generate_gs_prepare_channel_masks(struct brw_codegen *p,
572 struct brw_reg dst)
573 {
574 /* We want to left shift just DWORD 4 (the x component belonging to the
575 * second geometry shader invocation) by 4 bits. So generate the
576 * instruction:
577 *
578 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
579 */
580 dst = suboffset(vec1(dst), 4);
581 brw_push_insn_state(p);
582 brw_set_default_access_mode(p, BRW_ALIGN_1);
583 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
584 brw_SHL(p, dst, dst, brw_imm_ud(4));
585 brw_pop_insn_state(p);
586 }
587
588 static void
589 generate_gs_set_channel_masks(struct brw_codegen *p,
590 struct brw_reg dst,
591 struct brw_reg src)
592 {
593 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
594 * Header: M0.5):
595 *
596 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
597 *
598 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
599 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
600 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
601 * channel enable to determine the final channel enable. For the
602 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
603 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
604 * in the writeback message. For the URB_WRITE_OWORD &
605 * URB_WRITE_HWORD messages, when final channel enable is 1 it
606 * indicates that Vertex 1 DATA [3] will be written to the surface.
607 *
608 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
609 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
610 *
611 * 14 Vertex 1 DATA [2] Channel Mask
612 * 13 Vertex 1 DATA [1] Channel Mask
613 * 12 Vertex 1 DATA [0] Channel Mask
614 * 11 Vertex 0 DATA [3] Channel Mask
615 * 10 Vertex 0 DATA [2] Channel Mask
616 * 9 Vertex 0 DATA [1] Channel Mask
617 * 8 Vertex 0 DATA [0] Channel Mask
618 *
619 * (This is from a section of the PRM that is agnostic to the particular
620 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
621 * geometry shader invocations 0 and 1, respectively). Since we have the
622 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
623 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
624 * DWORD 4, we just need to OR them together and store the result in bits
625 * 15:8 of DWORD 5.
626 *
627 * It's easier to get the EU to do this if we think of the src and dst
628 * registers as composed of 32 bytes each; then, we want to pick up the
629 * contents of bytes 0 and 16 from src, OR them together, and store them in
630 * byte 21.
631 *
632 * We can do that by the following EU instruction:
633 *
634 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
635 *
636 * Note: this relies on the source register having zeros in (a) bits 7:4 of
637 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
638 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
639 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
640 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
641 * contain valid channel mask values (which are in the range 0x0-0xf).
642 */
643 dst = retype(dst, BRW_REGISTER_TYPE_UB);
644 src = retype(src, BRW_REGISTER_TYPE_UB);
645 brw_push_insn_state(p);
646 brw_set_default_access_mode(p, BRW_ALIGN_1);
647 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
648 brw_OR(p, suboffset(vec1(dst), 21), vec1(src), suboffset(vec1(src), 16));
649 brw_pop_insn_state(p);
650 }
651
652 static void
653 generate_gs_get_instance_id(struct brw_codegen *p,
654 struct brw_reg dst)
655 {
656 /* We want to right shift R0.0 & R0.1 by GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
657 * and store into dst.0 & dst.4. So generate the instruction:
658 *
659 * shr(8) dst<1> R0<1,4,0> GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
660 */
661 brw_push_insn_state(p);
662 brw_set_default_access_mode(p, BRW_ALIGN_1);
663 dst = retype(dst, BRW_REGISTER_TYPE_UD);
664 struct brw_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
665 brw_SHR(p, dst, stride(r0, 1, 4, 0),
666 brw_imm_ud(GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT));
667 brw_pop_insn_state(p);
668 }
669
670 static void
671 generate_gs_ff_sync_set_primitives(struct brw_codegen *p,
672 struct brw_reg dst,
673 struct brw_reg src0,
674 struct brw_reg src1,
675 struct brw_reg src2)
676 {
677 brw_push_insn_state(p);
678 brw_set_default_access_mode(p, BRW_ALIGN_1);
679 /* Save src0 data in 16:31 bits of dst.0 */
680 brw_AND(p, suboffset(vec1(dst), 0), suboffset(vec1(src0), 0),
681 brw_imm_ud(0xffffu));
682 brw_SHL(p, suboffset(vec1(dst), 0), suboffset(vec1(dst), 0), brw_imm_ud(16));
683 /* Save src1 data in 0:15 bits of dst.0 */
684 brw_AND(p, suboffset(vec1(src2), 0), suboffset(vec1(src1), 0),
685 brw_imm_ud(0xffffu));
686 brw_OR(p, suboffset(vec1(dst), 0),
687 suboffset(vec1(dst), 0),
688 suboffset(vec1(src2), 0));
689 brw_pop_insn_state(p);
690 }
691
692 static void
693 generate_gs_ff_sync(struct brw_codegen *p,
694 vec4_instruction *inst,
695 struct brw_reg dst,
696 struct brw_reg src0,
697 struct brw_reg src1)
698 {
699 /* This opcode uses an implied MRF register for:
700 * - the header of the ff_sync message. And as such it is expected to be
701 * initialized to r0 before calling here.
702 * - the destination where we will write the allocated URB handle.
703 */
704 struct brw_reg header =
705 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD);
706
707 /* Overwrite dword 0 of the header (SO vertices to write) and
708 * dword 1 (number of primitives written).
709 */
710 brw_push_insn_state(p);
711 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
712 brw_set_default_access_mode(p, BRW_ALIGN_1);
713 brw_MOV(p, get_element_ud(header, 0), get_element_ud(src1, 0));
714 brw_MOV(p, get_element_ud(header, 1), get_element_ud(src0, 0));
715 brw_pop_insn_state(p);
716
717 /* Allocate URB handle in dst */
718 brw_ff_sync(p,
719 dst,
720 0,
721 header,
722 1, /* allocate */
723 1, /* response length */
724 0 /* eot */);
725
726 /* Now put allocated urb handle in header.0 */
727 brw_push_insn_state(p);
728 brw_set_default_access_mode(p, BRW_ALIGN_1);
729 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
730 brw_MOV(p, get_element_ud(header, 0), get_element_ud(dst, 0));
731
732 /* src1 is not an immediate when we use transform feedback */
733 if (src1.file != BRW_IMMEDIATE_VALUE) {
734 brw_set_default_exec_size(p, BRW_EXECUTE_4);
735 brw_MOV(p, brw_vec4_grf(src1.nr, 0), brw_vec4_grf(dst.nr, 1));
736 }
737
738 brw_pop_insn_state(p);
739 }
740
741 static void
742 generate_gs_set_primitive_id(struct brw_codegen *p, struct brw_reg dst)
743 {
744 /* In gen6, PrimitiveID is delivered in R0.1 of the payload */
745 struct brw_reg src = brw_vec8_grf(0, 0);
746 brw_push_insn_state(p);
747 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
748 brw_set_default_access_mode(p, BRW_ALIGN_1);
749 brw_MOV(p, get_element_ud(dst, 0), get_element_ud(src, 1));
750 brw_pop_insn_state(p);
751 }
752
753 static void
754 generate_tcs_get_instance_id(struct brw_codegen *p, struct brw_reg dst)
755 {
756 const struct gen_device_info *devinfo = p->devinfo;
757 const bool ivb = devinfo->is_ivybridge || devinfo->is_baytrail;
758
759 /* "Instance Count" comes as part of the payload in r0.2 bits 23:17.
760 *
761 * Since we operate in SIMD4x2 mode, we need run half as many threads
762 * as necessary. So we assign (2i + 1, 2i) as the thread counts. We
763 * shift right by one less to accomplish the multiplication by two.
764 */
765 dst = retype(dst, BRW_REGISTER_TYPE_UD);
766 struct brw_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
767
768 brw_push_insn_state(p);
769 brw_set_default_access_mode(p, BRW_ALIGN_1);
770
771 const int mask = ivb ? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
772 const int shift = ivb ? 16 : 17;
773
774 brw_AND(p, get_element_ud(dst, 0), get_element_ud(r0, 2), brw_imm_ud(mask));
775 brw_SHR(p, get_element_ud(dst, 0), get_element_ud(dst, 0),
776 brw_imm_ud(shift - 1));
777 brw_ADD(p, get_element_ud(dst, 4), get_element_ud(dst, 0), brw_imm_ud(1));
778
779 brw_pop_insn_state(p);
780 }
781
782 static void
783 generate_tcs_urb_write(struct brw_codegen *p,
784 vec4_instruction *inst,
785 struct brw_reg urb_header)
786 {
787 const struct gen_device_info *devinfo = p->devinfo;
788
789 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
790 brw_set_dest(p, send, brw_null_reg());
791 brw_set_src0(p, send, urb_header);
792 brw_set_desc(p, send, brw_message_desc(devinfo, inst->mlen, 0, true));
793
794 brw_inst_set_sfid(devinfo, send, BRW_SFID_URB);
795 brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_WRITE_OWORD);
796 brw_inst_set_urb_global_offset(devinfo, send, inst->offset);
797 if (inst->urb_write_flags & BRW_URB_WRITE_EOT) {
798 brw_inst_set_eot(devinfo, send, 1);
799 } else {
800 brw_inst_set_urb_per_slot_offset(devinfo, send, 1);
801 brw_inst_set_urb_swizzle_control(devinfo, send, BRW_URB_SWIZZLE_INTERLEAVE);
802 }
803
804 /* what happens to swizzles? */
805 }
806
807
808 static void
809 generate_tcs_input_urb_offsets(struct brw_codegen *p,
810 struct brw_reg dst,
811 struct brw_reg vertex,
812 struct brw_reg offset)
813 {
814 /* Generates an URB read/write message header for HS/DS operation.
815 * Inputs are a vertex index, and a byte offset from the beginning of
816 * the vertex. */
817
818 /* If `vertex` is not an immediate, we clobber a0.0 */
819
820 assert(vertex.file == BRW_IMMEDIATE_VALUE || vertex.file == BRW_GENERAL_REGISTER_FILE);
821 assert(vertex.type == BRW_REGISTER_TYPE_UD || vertex.type == BRW_REGISTER_TYPE_D);
822
823 assert(dst.file == BRW_GENERAL_REGISTER_FILE);
824
825 brw_push_insn_state(p);
826 brw_set_default_access_mode(p, BRW_ALIGN_1);
827 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
828 brw_MOV(p, dst, brw_imm_ud(0));
829
830 /* m0.5 bits 8-15 are channel enables */
831 brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud(0xff00));
832
833 /* m0.0-0.1: URB handles */
834 if (vertex.file == BRW_IMMEDIATE_VALUE) {
835 uint32_t vertex_index = vertex.ud;
836 struct brw_reg index_reg = brw_vec1_grf(
837 1 + (vertex_index >> 3), vertex_index & 7);
838
839 brw_MOV(p, vec2(get_element_ud(dst, 0)),
840 retype(index_reg, BRW_REGISTER_TYPE_UD));
841 } else {
842 /* Use indirect addressing. ICP Handles are DWords (single channels
843 * of a register) and start at g1.0.
844 *
845 * In order to start our region at g1.0, we add 8 to the vertex index,
846 * effectively skipping over the 8 channels in g0.0. This gives us a
847 * DWord offset to the ICP Handle.
848 *
849 * Indirect addressing works in terms of bytes, so we then multiply
850 * the DWord offset by 4 (by shifting left by 2).
851 */
852 struct brw_reg addr = brw_address_reg(0);
853
854 /* bottom half: m0.0 = g[1.0 + vertex.0]UD */
855 brw_ADD(p, addr, retype(get_element_ud(vertex, 0), BRW_REGISTER_TYPE_UW),
856 brw_imm_uw(0x8));
857 brw_SHL(p, addr, addr, brw_imm_uw(2));
858 brw_MOV(p, get_element_ud(dst, 0), deref_1ud(brw_indirect(0, 0), 0));
859
860 /* top half: m0.1 = g[1.0 + vertex.4]UD */
861 brw_ADD(p, addr, retype(get_element_ud(vertex, 4), BRW_REGISTER_TYPE_UW),
862 brw_imm_uw(0x8));
863 brw_SHL(p, addr, addr, brw_imm_uw(2));
864 brw_MOV(p, get_element_ud(dst, 1), deref_1ud(brw_indirect(0, 0), 0));
865 }
866
867 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
868 if (offset.file != ARF)
869 brw_MOV(p, vec2(get_element_ud(dst, 3)), stride(offset, 4, 1, 0));
870
871 brw_pop_insn_state(p);
872 }
873
874
875 static void
876 generate_tcs_output_urb_offsets(struct brw_codegen *p,
877 struct brw_reg dst,
878 struct brw_reg write_mask,
879 struct brw_reg offset)
880 {
881 /* Generates an URB read/write message header for HS/DS operation, for the patch URB entry. */
882 assert(dst.file == BRW_GENERAL_REGISTER_FILE || dst.file == BRW_MESSAGE_REGISTER_FILE);
883
884 assert(write_mask.file == BRW_IMMEDIATE_VALUE);
885 assert(write_mask.type == BRW_REGISTER_TYPE_UD);
886
887 brw_push_insn_state(p);
888
889 brw_set_default_access_mode(p, BRW_ALIGN_1);
890 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
891 brw_MOV(p, dst, brw_imm_ud(0));
892
893 unsigned mask = write_mask.ud;
894
895 /* m0.5 bits 15:12 and 11:8 are channel enables */
896 brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud((mask << 8) | (mask << 12)));
897
898 /* HS patch URB handle is delivered in r0.0 */
899 struct brw_reg urb_handle = brw_vec1_grf(0, 0);
900
901 /* m0.0-0.1: URB handles */
902 brw_MOV(p, vec2(get_element_ud(dst, 0)),
903 retype(urb_handle, BRW_REGISTER_TYPE_UD));
904
905 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
906 if (offset.file != ARF)
907 brw_MOV(p, vec2(get_element_ud(dst, 3)), stride(offset, 4, 1, 0));
908
909 brw_pop_insn_state(p);
910 }
911
912 static void
913 generate_tes_create_input_read_header(struct brw_codegen *p,
914 struct brw_reg dst)
915 {
916 brw_push_insn_state(p);
917 brw_set_default_access_mode(p, BRW_ALIGN_1);
918 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
919
920 /* Initialize the register to 0 */
921 brw_MOV(p, dst, brw_imm_ud(0));
922
923 /* Enable all the channels in m0.5 bits 15:8 */
924 brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud(0xff00));
925
926 /* Copy g1.3 (the patch URB handle) to m0.0 and m0.1. For safety,
927 * mask out irrelevant "Reserved" bits, as they're not marked MBZ.
928 */
929 brw_AND(p, vec2(get_element_ud(dst, 0)),
930 retype(brw_vec1_grf(1, 3), BRW_REGISTER_TYPE_UD),
931 brw_imm_ud(0x1fff));
932 brw_pop_insn_state(p);
933 }
934
935 static void
936 generate_tes_add_indirect_urb_offset(struct brw_codegen *p,
937 struct brw_reg dst,
938 struct brw_reg header,
939 struct brw_reg offset)
940 {
941 brw_push_insn_state(p);
942 brw_set_default_access_mode(p, BRW_ALIGN_1);
943 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
944
945 brw_MOV(p, dst, header);
946
947 /* Uniforms will have a stride <0;4,1>, and we need to convert to <0;1,0>.
948 * Other values get <4;1,0>.
949 */
950 struct brw_reg restrided_offset;
951 if (offset.vstride == BRW_VERTICAL_STRIDE_0 &&
952 offset.width == BRW_WIDTH_4 &&
953 offset.hstride == BRW_HORIZONTAL_STRIDE_1) {
954 restrided_offset = stride(offset, 0, 1, 0);
955 } else {
956 restrided_offset = stride(offset, 4, 1, 0);
957 }
958
959 /* m0.3-0.4: 128-bit-granular offsets into the URB from the handles */
960 brw_MOV(p, vec2(get_element_ud(dst, 3)), restrided_offset);
961
962 brw_pop_insn_state(p);
963 }
964
965 static void
966 generate_vec4_urb_read(struct brw_codegen *p,
967 vec4_instruction *inst,
968 struct brw_reg dst,
969 struct brw_reg header)
970 {
971 const struct gen_device_info *devinfo = p->devinfo;
972
973 assert(header.file == BRW_GENERAL_REGISTER_FILE);
974 assert(header.type == BRW_REGISTER_TYPE_UD);
975
976 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
977 brw_set_dest(p, send, dst);
978 brw_set_src0(p, send, header);
979
980 brw_set_desc(p, send, brw_message_desc(devinfo, 1, 1, true));
981
982 brw_inst_set_sfid(devinfo, send, BRW_SFID_URB);
983 brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_READ_OWORD);
984 brw_inst_set_urb_swizzle_control(devinfo, send, BRW_URB_SWIZZLE_INTERLEAVE);
985 brw_inst_set_urb_per_slot_offset(devinfo, send, 1);
986
987 brw_inst_set_urb_global_offset(devinfo, send, inst->offset);
988 }
989
990 static void
991 generate_tcs_release_input(struct brw_codegen *p,
992 struct brw_reg header,
993 struct brw_reg vertex,
994 struct brw_reg is_unpaired)
995 {
996 const struct gen_device_info *devinfo = p->devinfo;
997
998 assert(vertex.file == BRW_IMMEDIATE_VALUE);
999 assert(vertex.type == BRW_REGISTER_TYPE_UD);
1000
1001 /* m0.0-0.1: URB handles */
1002 struct brw_reg urb_handles =
1003 retype(brw_vec2_grf(1 + (vertex.ud >> 3), vertex.ud & 7),
1004 BRW_REGISTER_TYPE_UD);
1005
1006 brw_push_insn_state(p);
1007 brw_set_default_access_mode(p, BRW_ALIGN_1);
1008 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1009 brw_MOV(p, header, brw_imm_ud(0));
1010 brw_MOV(p, vec2(get_element_ud(header, 0)), urb_handles);
1011 brw_pop_insn_state(p);
1012
1013 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1014 brw_set_dest(p, send, brw_null_reg());
1015 brw_set_src0(p, send, header);
1016 brw_set_desc(p, send, brw_message_desc(devinfo, 1, 0, true));
1017
1018 brw_inst_set_sfid(devinfo, send, BRW_SFID_URB);
1019 brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_READ_OWORD);
1020 brw_inst_set_urb_complete(devinfo, send, 1);
1021 brw_inst_set_urb_swizzle_control(devinfo, send, is_unpaired.ud ?
1022 BRW_URB_SWIZZLE_NONE :
1023 BRW_URB_SWIZZLE_INTERLEAVE);
1024 }
1025
1026 static void
1027 generate_tcs_thread_end(struct brw_codegen *p, vec4_instruction *inst)
1028 {
1029 struct brw_reg header = brw_message_reg(inst->base_mrf);
1030
1031 brw_push_insn_state(p);
1032 brw_set_default_access_mode(p, BRW_ALIGN_1);
1033 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1034 brw_MOV(p, header, brw_imm_ud(0));
1035 brw_MOV(p, get_element_ud(header, 5), brw_imm_ud(WRITEMASK_X << 8));
1036 brw_MOV(p, get_element_ud(header, 0),
1037 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD));
1038 brw_MOV(p, brw_message_reg(inst->base_mrf + 1), brw_imm_ud(0u));
1039 brw_pop_insn_state(p);
1040
1041 brw_urb_WRITE(p,
1042 brw_null_reg(), /* dest */
1043 inst->base_mrf, /* starting mrf reg nr */
1044 header,
1045 BRW_URB_WRITE_EOT | BRW_URB_WRITE_OWORD |
1046 BRW_URB_WRITE_USE_CHANNEL_MASKS,
1047 inst->mlen,
1048 0, /* response len */
1049 0, /* urb destination offset */
1050 0);
1051 }
1052
1053 static void
1054 generate_tes_get_primitive_id(struct brw_codegen *p, struct brw_reg dst)
1055 {
1056 brw_push_insn_state(p);
1057 brw_set_default_access_mode(p, BRW_ALIGN_1);
1058 brw_MOV(p, dst, retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_D));
1059 brw_pop_insn_state(p);
1060 }
1061
1062 static void
1063 generate_tcs_get_primitive_id(struct brw_codegen *p, struct brw_reg dst)
1064 {
1065 brw_push_insn_state(p);
1066 brw_set_default_access_mode(p, BRW_ALIGN_1);
1067 brw_MOV(p, dst, retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
1068 brw_pop_insn_state(p);
1069 }
1070
1071 static void
1072 generate_tcs_create_barrier_header(struct brw_codegen *p,
1073 struct brw_vue_prog_data *prog_data,
1074 struct brw_reg dst)
1075 {
1076 const struct gen_device_info *devinfo = p->devinfo;
1077 const bool ivb = devinfo->is_ivybridge || devinfo->is_baytrail;
1078 struct brw_reg m0_2 = get_element_ud(dst, 2);
1079 unsigned instances = ((struct brw_tcs_prog_data *) prog_data)->instances;
1080
1081 brw_push_insn_state(p);
1082 brw_set_default_access_mode(p, BRW_ALIGN_1);
1083 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1084
1085 /* Zero the message header */
1086 brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
1087
1088 /* Copy "Barrier ID" from r0.2, bits 16:13 (Gen7.5+) or 15:12 (Gen7) */
1089 brw_AND(p, m0_2,
1090 retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
1091 brw_imm_ud(ivb ? INTEL_MASK(15, 12) : INTEL_MASK(16, 13)));
1092
1093 /* Shift it up to bits 27:24. */
1094 brw_SHL(p, m0_2, get_element_ud(dst, 2), brw_imm_ud(ivb ? 12 : 11));
1095
1096 /* Set the Barrier Count and the enable bit */
1097 brw_OR(p, m0_2, m0_2, brw_imm_ud(instances << 9 | (1 << 15)));
1098
1099 brw_pop_insn_state(p);
1100 }
1101
1102 static void
1103 generate_oword_dual_block_offsets(struct brw_codegen *p,
1104 struct brw_reg m1,
1105 struct brw_reg index)
1106 {
1107 int second_vertex_offset;
1108
1109 if (p->devinfo->gen >= 6)
1110 second_vertex_offset = 1;
1111 else
1112 second_vertex_offset = 16;
1113
1114 m1 = retype(m1, BRW_REGISTER_TYPE_D);
1115
1116 /* Set up M1 (message payload). Only the block offsets in M1.0 and
1117 * M1.4 are used, and the rest are ignored.
1118 */
1119 struct brw_reg m1_0 = suboffset(vec1(m1), 0);
1120 struct brw_reg m1_4 = suboffset(vec1(m1), 4);
1121 struct brw_reg index_0 = suboffset(vec1(index), 0);
1122 struct brw_reg index_4 = suboffset(vec1(index), 4);
1123
1124 brw_push_insn_state(p);
1125 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1126 brw_set_default_access_mode(p, BRW_ALIGN_1);
1127
1128 brw_MOV(p, m1_0, index_0);
1129
1130 if (index.file == BRW_IMMEDIATE_VALUE) {
1131 index_4.ud += second_vertex_offset;
1132 brw_MOV(p, m1_4, index_4);
1133 } else {
1134 brw_ADD(p, m1_4, index_4, brw_imm_d(second_vertex_offset));
1135 }
1136
1137 brw_pop_insn_state(p);
1138 }
1139
1140 static void
1141 generate_unpack_flags(struct brw_codegen *p,
1142 struct brw_reg dst)
1143 {
1144 brw_push_insn_state(p);
1145 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1146 brw_set_default_access_mode(p, BRW_ALIGN_1);
1147
1148 struct brw_reg flags = brw_flag_reg(0, 0);
1149 struct brw_reg dst_0 = suboffset(vec1(dst), 0);
1150 struct brw_reg dst_4 = suboffset(vec1(dst), 4);
1151
1152 brw_AND(p, dst_0, flags, brw_imm_ud(0x0f));
1153 brw_AND(p, dst_4, flags, brw_imm_ud(0xf0));
1154 brw_SHR(p, dst_4, dst_4, brw_imm_ud(4));
1155
1156 brw_pop_insn_state(p);
1157 }
1158
1159 static void
1160 generate_scratch_read(struct brw_codegen *p,
1161 vec4_instruction *inst,
1162 struct brw_reg dst,
1163 struct brw_reg index)
1164 {
1165 const struct gen_device_info *devinfo = p->devinfo;
1166 struct brw_reg header = brw_vec8_grf(0, 0);
1167
1168 gen6_resolve_implied_move(p, &header, inst->base_mrf);
1169
1170 generate_oword_dual_block_offsets(p, brw_message_reg(inst->base_mrf + 1),
1171 index);
1172
1173 uint32_t msg_type;
1174
1175 if (devinfo->gen >= 6)
1176 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1177 else if (devinfo->gen == 5 || devinfo->is_g4x)
1178 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1179 else
1180 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1181
1182 const unsigned target_cache =
1183 devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1184 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1185 BRW_SFID_DATAPORT_READ;
1186
1187 /* Each of the 8 channel enables is considered for whether each
1188 * dword is written.
1189 */
1190 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1191 brw_inst_set_sfid(devinfo, send, target_cache);
1192 brw_set_dest(p, send, dst);
1193 brw_set_src0(p, send, header);
1194 if (devinfo->gen < 6)
1195 brw_inst_set_cond_modifier(devinfo, send, inst->base_mrf);
1196 brw_set_desc(p, send,
1197 brw_message_desc(devinfo, 2, 1, true) |
1198 brw_dp_read_desc(devinfo,
1199 brw_scratch_surface_idx(p),
1200 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
1201 msg_type, BRW_DATAPORT_READ_TARGET_RENDER_CACHE));
1202 }
1203
1204 static void
1205 generate_scratch_write(struct brw_codegen *p,
1206 vec4_instruction *inst,
1207 struct brw_reg dst,
1208 struct brw_reg src,
1209 struct brw_reg index)
1210 {
1211 const struct gen_device_info *devinfo = p->devinfo;
1212 const unsigned target_cache =
1213 (devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
1214 devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
1215 BRW_SFID_DATAPORT_WRITE);
1216 struct brw_reg header = brw_vec8_grf(0, 0);
1217 bool write_commit;
1218
1219 /* If the instruction is predicated, we'll predicate the send, not
1220 * the header setup.
1221 */
1222 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1223
1224 gen6_resolve_implied_move(p, &header, inst->base_mrf);
1225
1226 generate_oword_dual_block_offsets(p, brw_message_reg(inst->base_mrf + 1),
1227 index);
1228
1229 brw_MOV(p,
1230 retype(brw_message_reg(inst->base_mrf + 2), BRW_REGISTER_TYPE_D),
1231 retype(src, BRW_REGISTER_TYPE_D));
1232
1233 uint32_t msg_type;
1234
1235 if (devinfo->gen >= 7)
1236 msg_type = GEN7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE;
1237 else if (devinfo->gen == 6)
1238 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
1239 else
1240 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
1241
1242 brw_set_default_predicate_control(p, inst->predicate);
1243
1244 /* Pre-gen6, we have to specify write commits to ensure ordering
1245 * between reads and writes within a thread. Afterwards, that's
1246 * guaranteed and write commits only matter for inter-thread
1247 * synchronization.
1248 */
1249 if (devinfo->gen >= 6) {
1250 write_commit = false;
1251 } else {
1252 /* The visitor set up our destination register to be g0. This
1253 * means that when the next read comes along, we will end up
1254 * reading from g0 and causing a block on the write commit. For
1255 * write-after-read, we are relying on the value of the previous
1256 * read being used (and thus blocking on completion) before our
1257 * write is executed. This means we have to be careful in
1258 * instruction scheduling to not violate this assumption.
1259 */
1260 write_commit = true;
1261 }
1262
1263 /* Each of the 8 channel enables is considered for whether each
1264 * dword is written.
1265 */
1266 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1267 brw_inst_set_sfid(p->devinfo, send, target_cache);
1268 brw_set_dest(p, send, dst);
1269 brw_set_src0(p, send, header);
1270 if (devinfo->gen < 6)
1271 brw_inst_set_cond_modifier(p->devinfo, send, inst->base_mrf);
1272 brw_set_desc(p, send,
1273 brw_message_desc(devinfo, 3, write_commit, true) |
1274 brw_dp_write_desc(devinfo,
1275 brw_scratch_surface_idx(p),
1276 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
1277 msg_type,
1278 false, /* not a render target write */
1279 write_commit));
1280 }
1281
1282 static void
1283 generate_pull_constant_load(struct brw_codegen *p,
1284 struct brw_vue_prog_data *prog_data,
1285 vec4_instruction *inst,
1286 struct brw_reg dst,
1287 struct brw_reg index,
1288 struct brw_reg offset)
1289 {
1290 const struct gen_device_info *devinfo = p->devinfo;
1291 const unsigned target_cache =
1292 (devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_SAMPLER_CACHE :
1293 BRW_SFID_DATAPORT_READ);
1294 assert(index.file == BRW_IMMEDIATE_VALUE &&
1295 index.type == BRW_REGISTER_TYPE_UD);
1296 uint32_t surf_index = index.ud;
1297
1298 struct brw_reg header = brw_vec8_grf(0, 0);
1299
1300 gen6_resolve_implied_move(p, &header, inst->base_mrf);
1301
1302 if (devinfo->gen >= 6) {
1303 if (offset.file == BRW_IMMEDIATE_VALUE) {
1304 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1),
1305 BRW_REGISTER_TYPE_D),
1306 brw_imm_d(offset.ud >> 4));
1307 } else {
1308 brw_SHR(p, retype(brw_message_reg(inst->base_mrf + 1),
1309 BRW_REGISTER_TYPE_D),
1310 offset, brw_imm_d(4));
1311 }
1312 } else {
1313 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1),
1314 BRW_REGISTER_TYPE_D),
1315 offset);
1316 }
1317
1318 uint32_t msg_type;
1319
1320 if (devinfo->gen >= 6)
1321 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1322 else if (devinfo->gen == 5 || devinfo->is_g4x)
1323 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1324 else
1325 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1326
1327 /* Each of the 8 channel enables is considered for whether each
1328 * dword is written.
1329 */
1330 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1331 brw_inst_set_sfid(devinfo, send, target_cache);
1332 brw_set_dest(p, send, dst);
1333 brw_set_src0(p, send, header);
1334 if (devinfo->gen < 6)
1335 brw_inst_set_cond_modifier(p->devinfo, send, inst->base_mrf);
1336 brw_set_desc(p, send,
1337 brw_message_desc(devinfo, 2, 1, true) |
1338 brw_dp_read_desc(devinfo, surf_index,
1339 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
1340 msg_type,
1341 BRW_DATAPORT_READ_TARGET_DATA_CACHE));
1342 }
1343
1344 static void
1345 generate_get_buffer_size(struct brw_codegen *p,
1346 struct brw_vue_prog_data *prog_data,
1347 vec4_instruction *inst,
1348 struct brw_reg dst,
1349 struct brw_reg src,
1350 struct brw_reg surf_index)
1351 {
1352 assert(p->devinfo->gen >= 7);
1353 assert(surf_index.type == BRW_REGISTER_TYPE_UD &&
1354 surf_index.file == BRW_IMMEDIATE_VALUE);
1355
1356 brw_SAMPLE(p,
1357 dst,
1358 inst->base_mrf,
1359 src,
1360 surf_index.ud,
1361 0,
1362 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO,
1363 1, /* response length */
1364 inst->mlen,
1365 inst->header_size > 0,
1366 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1367 BRW_SAMPLER_RETURN_FORMAT_SINT32);
1368 }
1369
1370 static void
1371 generate_pull_constant_load_gen7(struct brw_codegen *p,
1372 struct brw_vue_prog_data *prog_data,
1373 vec4_instruction *inst,
1374 struct brw_reg dst,
1375 struct brw_reg surf_index,
1376 struct brw_reg offset)
1377 {
1378 const struct gen_device_info *devinfo = p->devinfo;
1379 assert(surf_index.type == BRW_REGISTER_TYPE_UD);
1380
1381 if (surf_index.file == BRW_IMMEDIATE_VALUE) {
1382
1383 brw_inst *insn = brw_next_insn(p, BRW_OPCODE_SEND);
1384 brw_inst_set_sfid(devinfo, insn, BRW_SFID_SAMPLER);
1385 brw_set_dest(p, insn, dst);
1386 brw_set_src0(p, insn, offset);
1387 brw_set_desc(p, insn,
1388 brw_message_desc(devinfo, inst->mlen, 1, inst->header_size) |
1389 brw_sampler_desc(devinfo, surf_index.ud,
1390 0, /* LD message ignores sampler unit */
1391 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1392 BRW_SAMPLER_SIMD_MODE_SIMD4X2, 0));
1393 } else {
1394
1395 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1396
1397 brw_push_insn_state(p);
1398 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1399 brw_set_default_access_mode(p, BRW_ALIGN_1);
1400
1401 /* a0.0 = surf_index & 0xff */
1402 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
1403 brw_inst_set_exec_size(devinfo, insn_and, BRW_EXECUTE_1);
1404 brw_set_dest(p, insn_and, addr);
1405 brw_set_src0(p, insn_and, vec1(retype(surf_index, BRW_REGISTER_TYPE_UD)));
1406 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1407
1408 brw_pop_insn_state(p);
1409
1410 /* dst = send(offset, a0.0 | <descriptor>) */
1411 brw_send_indirect_message(
1412 p, BRW_SFID_SAMPLER, dst, offset, addr,
1413 brw_message_desc(devinfo, inst->mlen, 1, inst->header_size) |
1414 brw_sampler_desc(devinfo,
1415 0 /* surface */,
1416 0 /* sampler */,
1417 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1418 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1419 0),
1420 false /* EOT */);
1421 }
1422 }
1423
1424 static void
1425 generate_set_simd4x2_header_gen9(struct brw_codegen *p,
1426 vec4_instruction *,
1427 struct brw_reg dst)
1428 {
1429 brw_push_insn_state(p);
1430 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1431
1432 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1433 brw_MOV(p, vec8(dst), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1434
1435 brw_set_default_access_mode(p, BRW_ALIGN_1);
1436 brw_MOV(p, get_element_ud(dst, 2),
1437 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2));
1438
1439 brw_pop_insn_state(p);
1440 }
1441
1442 static void
1443 generate_mov_indirect(struct brw_codegen *p,
1444 vec4_instruction *,
1445 struct brw_reg dst, struct brw_reg reg,
1446 struct brw_reg indirect)
1447 {
1448 assert(indirect.type == BRW_REGISTER_TYPE_UD);
1449 assert(p->devinfo->gen >= 6);
1450
1451 unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr * (REG_SIZE / 2);
1452
1453 /* This instruction acts in align1 mode */
1454 assert(dst.writemask == WRITEMASK_XYZW);
1455
1456 if (indirect.file == BRW_IMMEDIATE_VALUE) {
1457 imm_byte_offset += indirect.ud;
1458
1459 reg.nr = imm_byte_offset / REG_SIZE;
1460 reg.subnr = (imm_byte_offset / (REG_SIZE / 2)) % 2;
1461 unsigned shift = (imm_byte_offset / 4) % 4;
1462 reg.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift);
1463
1464 brw_MOV(p, dst, reg);
1465 } else {
1466 brw_push_insn_state(p);
1467 brw_set_default_access_mode(p, BRW_ALIGN_1);
1468 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1469
1470 struct brw_reg addr = vec8(brw_address_reg(0));
1471
1472 /* We need to move the indirect value into the address register. In
1473 * order to make things make some sense, we want to respect at least the
1474 * X component of the swizzle. In order to do that, we need to convert
1475 * the subnr (probably 0) to an align1 subnr and add in the swizzle.
1476 */
1477 assert(brw_is_single_value_swizzle(indirect.swizzle));
1478 indirect.subnr = (indirect.subnr * 4 + BRW_GET_SWZ(indirect.swizzle, 0));
1479
1480 /* We then use a region of <8,4,0>:uw to pick off the first 2 bytes of
1481 * the indirect and splat it out to all four channels of the given half
1482 * of a0.
1483 */
1484 indirect.subnr *= 2;
1485 indirect = stride(retype(indirect, BRW_REGISTER_TYPE_UW), 8, 4, 0);
1486 brw_ADD(p, addr, indirect, brw_imm_uw(imm_byte_offset));
1487
1488 /* Now we need to incorporate the swizzle from the source register */
1489 if (reg.swizzle != BRW_SWIZZLE_XXXX) {
1490 uint32_t uv_swiz = BRW_GET_SWZ(reg.swizzle, 0) << 2 |
1491 BRW_GET_SWZ(reg.swizzle, 1) << 6 |
1492 BRW_GET_SWZ(reg.swizzle, 2) << 10 |
1493 BRW_GET_SWZ(reg.swizzle, 3) << 14;
1494 uv_swiz |= uv_swiz << 16;
1495
1496 brw_ADD(p, addr, addr, brw_imm_uv(uv_swiz));
1497 }
1498
1499 brw_MOV(p, dst, retype(brw_VxH_indirect(0, 0), reg.type));
1500
1501 brw_pop_insn_state(p);
1502 }
1503 }
1504
1505 static void
1506 generate_code(struct brw_codegen *p,
1507 const struct brw_compiler *compiler,
1508 void *log_data,
1509 const nir_shader *nir,
1510 struct brw_vue_prog_data *prog_data,
1511 const struct cfg_t *cfg,
1512 const performance &perf,
1513 struct brw_compile_stats *stats)
1514 {
1515 const struct gen_device_info *devinfo = p->devinfo;
1516 const char *stage_abbrev = _mesa_shader_stage_to_abbrev(nir->info.stage);
1517 bool debug_flag = INTEL_DEBUG &
1518 intel_debug_flag_for_shader_stage(nir->info.stage);
1519 struct disasm_info *disasm_info = disasm_initialize(devinfo, cfg);
1520
1521 /* `send_count` explicitly does not include spills or fills, as we'd
1522 * like to use it as a metric for intentional memory access or other
1523 * shared function use. Otherwise, subtle changes to scheduling or
1524 * register allocation could cause it to fluctuate wildly - and that
1525 * effect is already counted in spill/fill counts.
1526 */
1527 int spill_count = 0, fill_count = 0;
1528 int loop_count = 0, send_count = 0;
1529
1530 foreach_block_and_inst (block, vec4_instruction, inst, cfg) {
1531 struct brw_reg src[3], dst;
1532
1533 if (unlikely(debug_flag))
1534 disasm_annotate(disasm_info, inst, p->next_insn_offset);
1535
1536 for (unsigned int i = 0; i < 3; i++) {
1537 src[i] = inst->src[i].as_brw_reg();
1538 }
1539 dst = inst->dst.as_brw_reg();
1540
1541 brw_set_default_predicate_control(p, inst->predicate);
1542 brw_set_default_predicate_inverse(p, inst->predicate_inverse);
1543 brw_set_default_flag_reg(p, inst->flag_subreg / 2, inst->flag_subreg % 2);
1544 brw_set_default_saturate(p, inst->saturate);
1545 brw_set_default_mask_control(p, inst->force_writemask_all);
1546 brw_set_default_acc_write_control(p, inst->writes_accumulator);
1547
1548 assert(inst->group % inst->exec_size == 0);
1549 assert(inst->group % 4 == 0);
1550
1551 /* There are some instructions where the destination is 64-bit
1552 * but we retype it to a smaller type. In that case, we cannot
1553 * double the exec_size.
1554 */
1555 const bool is_df = (get_exec_type_size(inst) == 8 ||
1556 inst->dst.type == BRW_REGISTER_TYPE_DF) &&
1557 inst->opcode != VEC4_OPCODE_PICK_LOW_32BIT &&
1558 inst->opcode != VEC4_OPCODE_PICK_HIGH_32BIT &&
1559 inst->opcode != VEC4_OPCODE_SET_LOW_32BIT &&
1560 inst->opcode != VEC4_OPCODE_SET_HIGH_32BIT;
1561
1562 unsigned exec_size = inst->exec_size;
1563 if (devinfo->gen == 7 && !devinfo->is_haswell && is_df)
1564 exec_size *= 2;
1565
1566 brw_set_default_exec_size(p, cvt(exec_size) - 1);
1567
1568 if (!inst->force_writemask_all)
1569 brw_set_default_group(p, inst->group);
1570
1571 assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
1572 assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
1573
1574 unsigned pre_emit_nr_insn = p->nr_insn;
1575
1576 switch (inst->opcode) {
1577 case VEC4_OPCODE_UNPACK_UNIFORM:
1578 case BRW_OPCODE_MOV:
1579 brw_MOV(p, dst, src[0]);
1580 break;
1581 case BRW_OPCODE_ADD:
1582 brw_ADD(p, dst, src[0], src[1]);
1583 break;
1584 case BRW_OPCODE_MUL:
1585 brw_MUL(p, dst, src[0], src[1]);
1586 break;
1587 case BRW_OPCODE_MACH:
1588 brw_MACH(p, dst, src[0], src[1]);
1589 break;
1590
1591 case BRW_OPCODE_MAD:
1592 assert(devinfo->gen >= 6);
1593 brw_MAD(p, dst, src[0], src[1], src[2]);
1594 break;
1595
1596 case BRW_OPCODE_FRC:
1597 brw_FRC(p, dst, src[0]);
1598 break;
1599 case BRW_OPCODE_RNDD:
1600 brw_RNDD(p, dst, src[0]);
1601 break;
1602 case BRW_OPCODE_RNDE:
1603 brw_RNDE(p, dst, src[0]);
1604 break;
1605 case BRW_OPCODE_RNDZ:
1606 brw_RNDZ(p, dst, src[0]);
1607 break;
1608
1609 case BRW_OPCODE_AND:
1610 brw_AND(p, dst, src[0], src[1]);
1611 break;
1612 case BRW_OPCODE_OR:
1613 brw_OR(p, dst, src[0], src[1]);
1614 break;
1615 case BRW_OPCODE_XOR:
1616 brw_XOR(p, dst, src[0], src[1]);
1617 break;
1618 case BRW_OPCODE_NOT:
1619 brw_NOT(p, dst, src[0]);
1620 break;
1621 case BRW_OPCODE_ASR:
1622 brw_ASR(p, dst, src[0], src[1]);
1623 break;
1624 case BRW_OPCODE_SHR:
1625 brw_SHR(p, dst, src[0], src[1]);
1626 break;
1627 case BRW_OPCODE_SHL:
1628 brw_SHL(p, dst, src[0], src[1]);
1629 break;
1630
1631 case BRW_OPCODE_CMP:
1632 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
1633 break;
1634 case BRW_OPCODE_SEL:
1635 brw_SEL(p, dst, src[0], src[1]);
1636 break;
1637
1638 case BRW_OPCODE_DPH:
1639 brw_DPH(p, dst, src[0], src[1]);
1640 break;
1641
1642 case BRW_OPCODE_DP4:
1643 brw_DP4(p, dst, src[0], src[1]);
1644 break;
1645
1646 case BRW_OPCODE_DP3:
1647 brw_DP3(p, dst, src[0], src[1]);
1648 break;
1649
1650 case BRW_OPCODE_DP2:
1651 brw_DP2(p, dst, src[0], src[1]);
1652 break;
1653
1654 case BRW_OPCODE_F32TO16:
1655 assert(devinfo->gen >= 7);
1656 brw_F32TO16(p, dst, src[0]);
1657 break;
1658
1659 case BRW_OPCODE_F16TO32:
1660 assert(devinfo->gen >= 7);
1661 brw_F16TO32(p, dst, src[0]);
1662 break;
1663
1664 case BRW_OPCODE_LRP:
1665 assert(devinfo->gen >= 6);
1666 brw_LRP(p, dst, src[0], src[1], src[2]);
1667 break;
1668
1669 case BRW_OPCODE_BFREV:
1670 assert(devinfo->gen >= 7);
1671 brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
1672 retype(src[0], BRW_REGISTER_TYPE_UD));
1673 break;
1674 case BRW_OPCODE_FBH:
1675 assert(devinfo->gen >= 7);
1676 brw_FBH(p, retype(dst, src[0].type), src[0]);
1677 break;
1678 case BRW_OPCODE_FBL:
1679 assert(devinfo->gen >= 7);
1680 brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD),
1681 retype(src[0], BRW_REGISTER_TYPE_UD));
1682 break;
1683 case BRW_OPCODE_LZD:
1684 brw_LZD(p, dst, src[0]);
1685 break;
1686 case BRW_OPCODE_CBIT:
1687 assert(devinfo->gen >= 7);
1688 brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD),
1689 retype(src[0], BRW_REGISTER_TYPE_UD));
1690 break;
1691 case BRW_OPCODE_ADDC:
1692 assert(devinfo->gen >= 7);
1693 brw_ADDC(p, dst, src[0], src[1]);
1694 break;
1695 case BRW_OPCODE_SUBB:
1696 assert(devinfo->gen >= 7);
1697 brw_SUBB(p, dst, src[0], src[1]);
1698 break;
1699 case BRW_OPCODE_MAC:
1700 brw_MAC(p, dst, src[0], src[1]);
1701 break;
1702
1703 case BRW_OPCODE_BFE:
1704 assert(devinfo->gen >= 7);
1705 brw_BFE(p, dst, src[0], src[1], src[2]);
1706 break;
1707
1708 case BRW_OPCODE_BFI1:
1709 assert(devinfo->gen >= 7);
1710 brw_BFI1(p, dst, src[0], src[1]);
1711 break;
1712 case BRW_OPCODE_BFI2:
1713 assert(devinfo->gen >= 7);
1714 brw_BFI2(p, dst, src[0], src[1], src[2]);
1715 break;
1716
1717 case BRW_OPCODE_IF:
1718 if (!inst->src[0].is_null()) {
1719 /* The instruction has an embedded compare (only allowed on gen6) */
1720 assert(devinfo->gen == 6);
1721 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
1722 } else {
1723 brw_inst *if_inst = brw_IF(p, BRW_EXECUTE_8);
1724 brw_inst_set_pred_control(p->devinfo, if_inst, inst->predicate);
1725 }
1726 break;
1727
1728 case BRW_OPCODE_ELSE:
1729 brw_ELSE(p);
1730 break;
1731 case BRW_OPCODE_ENDIF:
1732 brw_ENDIF(p);
1733 break;
1734
1735 case BRW_OPCODE_DO:
1736 brw_DO(p, BRW_EXECUTE_8);
1737 break;
1738
1739 case BRW_OPCODE_BREAK:
1740 brw_BREAK(p);
1741 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1742 break;
1743 case BRW_OPCODE_CONTINUE:
1744 brw_CONT(p);
1745 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1746 break;
1747
1748 case BRW_OPCODE_WHILE:
1749 brw_WHILE(p);
1750 loop_count++;
1751 break;
1752
1753 case SHADER_OPCODE_RCP:
1754 case SHADER_OPCODE_RSQ:
1755 case SHADER_OPCODE_SQRT:
1756 case SHADER_OPCODE_EXP2:
1757 case SHADER_OPCODE_LOG2:
1758 case SHADER_OPCODE_SIN:
1759 case SHADER_OPCODE_COS:
1760 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1761 if (devinfo->gen >= 7) {
1762 gen6_math(p, dst, brw_math_function(inst->opcode), src[0],
1763 brw_null_reg());
1764 } else if (devinfo->gen == 6) {
1765 generate_math_gen6(p, inst, dst, src[0], brw_null_reg());
1766 } else {
1767 generate_math1_gen4(p, inst, dst, src[0]);
1768 send_count++;
1769 }
1770 break;
1771
1772 case SHADER_OPCODE_POW:
1773 case SHADER_OPCODE_INT_QUOTIENT:
1774 case SHADER_OPCODE_INT_REMAINDER:
1775 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1776 if (devinfo->gen >= 7) {
1777 gen6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
1778 } else if (devinfo->gen == 6) {
1779 generate_math_gen6(p, inst, dst, src[0], src[1]);
1780 } else {
1781 generate_math2_gen4(p, inst, dst, src[0], src[1]);
1782 send_count++;
1783 }
1784 break;
1785
1786 case SHADER_OPCODE_TEX:
1787 case SHADER_OPCODE_TXD:
1788 case SHADER_OPCODE_TXF:
1789 case SHADER_OPCODE_TXF_CMS:
1790 case SHADER_OPCODE_TXF_CMS_W:
1791 case SHADER_OPCODE_TXF_MCS:
1792 case SHADER_OPCODE_TXL:
1793 case SHADER_OPCODE_TXS:
1794 case SHADER_OPCODE_TG4:
1795 case SHADER_OPCODE_TG4_OFFSET:
1796 case SHADER_OPCODE_SAMPLEINFO:
1797 generate_tex(p, prog_data, nir->info.stage,
1798 inst, dst, src[0], src[1], src[2]);
1799 send_count++;
1800 break;
1801
1802 case SHADER_OPCODE_GET_BUFFER_SIZE:
1803 generate_get_buffer_size(p, prog_data, inst, dst, src[0], src[1]);
1804 send_count++;
1805 break;
1806
1807 case VS_OPCODE_URB_WRITE:
1808 generate_vs_urb_write(p, inst);
1809 send_count++;
1810 break;
1811
1812 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1813 generate_scratch_read(p, inst, dst, src[0]);
1814 fill_count++;
1815 break;
1816
1817 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1818 generate_scratch_write(p, inst, dst, src[0], src[1]);
1819 spill_count++;
1820 break;
1821
1822 case VS_OPCODE_PULL_CONSTANT_LOAD:
1823 generate_pull_constant_load(p, prog_data, inst, dst, src[0], src[1]);
1824 send_count++;
1825 break;
1826
1827 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
1828 generate_pull_constant_load_gen7(p, prog_data, inst, dst, src[0], src[1]);
1829 send_count++;
1830 break;
1831
1832 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9:
1833 generate_set_simd4x2_header_gen9(p, inst, dst);
1834 break;
1835
1836 case GS_OPCODE_URB_WRITE:
1837 generate_gs_urb_write(p, inst);
1838 send_count++;
1839 break;
1840
1841 case GS_OPCODE_URB_WRITE_ALLOCATE:
1842 generate_gs_urb_write_allocate(p, inst);
1843 send_count++;
1844 break;
1845
1846 case GS_OPCODE_SVB_WRITE:
1847 generate_gs_svb_write(p, prog_data, inst, dst, src[0], src[1]);
1848 send_count++;
1849 break;
1850
1851 case GS_OPCODE_SVB_SET_DST_INDEX:
1852 generate_gs_svb_set_destination_index(p, inst, dst, src[0]);
1853 break;
1854
1855 case GS_OPCODE_THREAD_END:
1856 generate_gs_thread_end(p, inst);
1857 send_count++;
1858 break;
1859
1860 case GS_OPCODE_SET_WRITE_OFFSET:
1861 generate_gs_set_write_offset(p, dst, src[0], src[1]);
1862 break;
1863
1864 case GS_OPCODE_SET_VERTEX_COUNT:
1865 generate_gs_set_vertex_count(p, dst, src[0]);
1866 break;
1867
1868 case GS_OPCODE_FF_SYNC:
1869 generate_gs_ff_sync(p, inst, dst, src[0], src[1]);
1870 send_count++;
1871 break;
1872
1873 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES:
1874 generate_gs_ff_sync_set_primitives(p, dst, src[0], src[1], src[2]);
1875 break;
1876
1877 case GS_OPCODE_SET_PRIMITIVE_ID:
1878 generate_gs_set_primitive_id(p, dst);
1879 break;
1880
1881 case GS_OPCODE_SET_DWORD_2:
1882 generate_gs_set_dword_2(p, dst, src[0]);
1883 break;
1884
1885 case GS_OPCODE_PREPARE_CHANNEL_MASKS:
1886 generate_gs_prepare_channel_masks(p, dst);
1887 break;
1888
1889 case GS_OPCODE_SET_CHANNEL_MASKS:
1890 generate_gs_set_channel_masks(p, dst, src[0]);
1891 break;
1892
1893 case GS_OPCODE_GET_INSTANCE_ID:
1894 generate_gs_get_instance_id(p, dst);
1895 break;
1896
1897 case SHADER_OPCODE_SHADER_TIME_ADD:
1898 brw_shader_time_add(p, src[0],
1899 prog_data->base.binding_table.shader_time_start);
1900 send_count++;
1901 break;
1902
1903 case VEC4_OPCODE_UNTYPED_ATOMIC:
1904 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1905 brw_untyped_atomic(p, dst, src[0], src[1], src[2].ud, inst->mlen,
1906 !inst->dst.is_null(), inst->header_size);
1907 send_count++;
1908 break;
1909
1910 case VEC4_OPCODE_UNTYPED_SURFACE_READ:
1911 assert(!inst->header_size);
1912 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1913 brw_untyped_surface_read(p, dst, src[0], src[1], inst->mlen,
1914 src[2].ud);
1915 send_count++;
1916 break;
1917
1918 case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
1919 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1920 brw_untyped_surface_write(p, src[0], src[1], inst->mlen,
1921 src[2].ud, inst->header_size);
1922 send_count++;
1923 break;
1924
1925 case SHADER_OPCODE_MEMORY_FENCE:
1926 brw_memory_fence(p, dst, src[0], BRW_OPCODE_SEND,
1927 brw_message_target(inst->sfid),
1928 /* commit_enable */ false,
1929 /* bti */ 0);
1930 send_count++;
1931 break;
1932
1933 case SHADER_OPCODE_FIND_LIVE_CHANNEL: {
1934 const struct brw_reg mask =
1935 brw_stage_has_packed_dispatch(devinfo, nir->info.stage,
1936 &prog_data->base) ? brw_imm_ud(~0u) :
1937 brw_dmask_reg();
1938 brw_find_live_channel(p, dst, mask);
1939 break;
1940 }
1941
1942 case SHADER_OPCODE_BROADCAST:
1943 assert(inst->force_writemask_all);
1944 brw_broadcast(p, dst, src[0], src[1]);
1945 break;
1946
1947 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2:
1948 generate_unpack_flags(p, dst);
1949 break;
1950
1951 case VEC4_OPCODE_MOV_BYTES: {
1952 /* Moves the low byte from each channel, using an Align1 access mode
1953 * and a <4,1,0> source region.
1954 */
1955 assert(src[0].type == BRW_REGISTER_TYPE_UB ||
1956 src[0].type == BRW_REGISTER_TYPE_B);
1957
1958 brw_set_default_access_mode(p, BRW_ALIGN_1);
1959 src[0].vstride = BRW_VERTICAL_STRIDE_4;
1960 src[0].width = BRW_WIDTH_1;
1961 src[0].hstride = BRW_HORIZONTAL_STRIDE_0;
1962 brw_MOV(p, dst, src[0]);
1963 brw_set_default_access_mode(p, BRW_ALIGN_16);
1964 break;
1965 }
1966
1967 case VEC4_OPCODE_DOUBLE_TO_F32:
1968 case VEC4_OPCODE_DOUBLE_TO_D32:
1969 case VEC4_OPCODE_DOUBLE_TO_U32: {
1970 assert(type_sz(src[0].type) == 8);
1971 assert(type_sz(dst.type) == 8);
1972
1973 brw_reg_type dst_type;
1974
1975 switch (inst->opcode) {
1976 case VEC4_OPCODE_DOUBLE_TO_F32:
1977 dst_type = BRW_REGISTER_TYPE_F;
1978 break;
1979 case VEC4_OPCODE_DOUBLE_TO_D32:
1980 dst_type = BRW_REGISTER_TYPE_D;
1981 break;
1982 case VEC4_OPCODE_DOUBLE_TO_U32:
1983 dst_type = BRW_REGISTER_TYPE_UD;
1984 break;
1985 default:
1986 unreachable("Not supported conversion");
1987 }
1988 dst = retype(dst, dst_type);
1989
1990 brw_set_default_access_mode(p, BRW_ALIGN_1);
1991
1992 /* When converting from DF->F, we set destination's stride as 2 as an
1993 * aligment requirement. But in IVB/BYT, each DF implicitly writes
1994 * two floats, being the first one the converted value. So we don't
1995 * need to explicitly set stride 2, but 1.
1996 */
1997 struct brw_reg spread_dst;
1998 if (devinfo->gen == 7 && !devinfo->is_haswell)
1999 spread_dst = stride(dst, 8, 4, 1);
2000 else
2001 spread_dst = stride(dst, 8, 4, 2);
2002
2003 brw_MOV(p, spread_dst, src[0]);
2004
2005 brw_set_default_access_mode(p, BRW_ALIGN_16);
2006 break;
2007 }
2008
2009 case VEC4_OPCODE_TO_DOUBLE: {
2010 assert(type_sz(src[0].type) == 4);
2011 assert(type_sz(dst.type) == 8);
2012
2013 brw_set_default_access_mode(p, BRW_ALIGN_1);
2014
2015 brw_MOV(p, dst, src[0]);
2016
2017 brw_set_default_access_mode(p, BRW_ALIGN_16);
2018 break;
2019 }
2020
2021 case VEC4_OPCODE_PICK_LOW_32BIT:
2022 case VEC4_OPCODE_PICK_HIGH_32BIT: {
2023 /* Stores the low/high 32-bit of each 64-bit element in src[0] into
2024 * dst using ALIGN1 mode and a <8,4,2>:UD region on the source.
2025 */
2026 assert(type_sz(src[0].type) == 8);
2027 assert(type_sz(dst.type) == 4);
2028
2029 brw_set_default_access_mode(p, BRW_ALIGN_1);
2030
2031 dst = retype(dst, BRW_REGISTER_TYPE_UD);
2032 dst.hstride = BRW_HORIZONTAL_STRIDE_1;
2033
2034 src[0] = retype(src[0], BRW_REGISTER_TYPE_UD);
2035 if (inst->opcode == VEC4_OPCODE_PICK_HIGH_32BIT)
2036 src[0] = suboffset(src[0], 1);
2037 src[0] = spread(src[0], 2);
2038 brw_MOV(p, dst, src[0]);
2039
2040 brw_set_default_access_mode(p, BRW_ALIGN_16);
2041 break;
2042 }
2043
2044 case VEC4_OPCODE_SET_LOW_32BIT:
2045 case VEC4_OPCODE_SET_HIGH_32BIT: {
2046 /* Reads consecutive 32-bit elements from src[0] and writes
2047 * them to the low/high 32-bit of each 64-bit element in dst.
2048 */
2049 assert(type_sz(src[0].type) == 4);
2050 assert(type_sz(dst.type) == 8);
2051
2052 brw_set_default_access_mode(p, BRW_ALIGN_1);
2053
2054 dst = retype(dst, BRW_REGISTER_TYPE_UD);
2055 if (inst->opcode == VEC4_OPCODE_SET_HIGH_32BIT)
2056 dst = suboffset(dst, 1);
2057 dst.hstride = BRW_HORIZONTAL_STRIDE_2;
2058
2059 src[0] = retype(src[0], BRW_REGISTER_TYPE_UD);
2060 brw_MOV(p, dst, src[0]);
2061
2062 brw_set_default_access_mode(p, BRW_ALIGN_16);
2063 break;
2064 }
2065
2066 case VEC4_OPCODE_PACK_BYTES: {
2067 /* Is effectively:
2068 *
2069 * mov(8) dst<16,4,1>:UB src<4,1,0>:UB
2070 *
2071 * but destinations' only regioning is horizontal stride, so instead we
2072 * have to use two instructions:
2073 *
2074 * mov(4) dst<1>:UB src<4,1,0>:UB
2075 * mov(4) dst.16<1>:UB src.16<4,1,0>:UB
2076 *
2077 * where they pack the four bytes from the low and high four DW.
2078 */
2079 assert(util_is_power_of_two_nonzero(dst.writemask));
2080 unsigned offset = __builtin_ctz(dst.writemask);
2081
2082 dst.type = BRW_REGISTER_TYPE_UB;
2083
2084 brw_set_default_access_mode(p, BRW_ALIGN_1);
2085
2086 src[0].type = BRW_REGISTER_TYPE_UB;
2087 src[0].vstride = BRW_VERTICAL_STRIDE_4;
2088 src[0].width = BRW_WIDTH_1;
2089 src[0].hstride = BRW_HORIZONTAL_STRIDE_0;
2090 dst.subnr = offset * 4;
2091 struct brw_inst *insn = brw_MOV(p, dst, src[0]);
2092 brw_inst_set_exec_size(p->devinfo, insn, BRW_EXECUTE_4);
2093 brw_inst_set_no_dd_clear(p->devinfo, insn, true);
2094 brw_inst_set_no_dd_check(p->devinfo, insn, inst->no_dd_check);
2095
2096 src[0].subnr = 16;
2097 dst.subnr = 16 + offset * 4;
2098 insn = brw_MOV(p, dst, src[0]);
2099 brw_inst_set_exec_size(p->devinfo, insn, BRW_EXECUTE_4);
2100 brw_inst_set_no_dd_clear(p->devinfo, insn, inst->no_dd_clear);
2101 brw_inst_set_no_dd_check(p->devinfo, insn, true);
2102
2103 brw_set_default_access_mode(p, BRW_ALIGN_16);
2104 break;
2105 }
2106
2107 case TCS_OPCODE_URB_WRITE:
2108 generate_tcs_urb_write(p, inst, src[0]);
2109 send_count++;
2110 break;
2111
2112 case VEC4_OPCODE_URB_READ:
2113 generate_vec4_urb_read(p, inst, dst, src[0]);
2114 send_count++;
2115 break;
2116
2117 case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
2118 generate_tcs_input_urb_offsets(p, dst, src[0], src[1]);
2119 break;
2120
2121 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
2122 generate_tcs_output_urb_offsets(p, dst, src[0], src[1]);
2123 break;
2124
2125 case TCS_OPCODE_GET_INSTANCE_ID:
2126 generate_tcs_get_instance_id(p, dst);
2127 break;
2128
2129 case TCS_OPCODE_GET_PRIMITIVE_ID:
2130 generate_tcs_get_primitive_id(p, dst);
2131 break;
2132
2133 case TCS_OPCODE_CREATE_BARRIER_HEADER:
2134 generate_tcs_create_barrier_header(p, prog_data, dst);
2135 break;
2136
2137 case TES_OPCODE_CREATE_INPUT_READ_HEADER:
2138 generate_tes_create_input_read_header(p, dst);
2139 break;
2140
2141 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
2142 generate_tes_add_indirect_urb_offset(p, dst, src[0], src[1]);
2143 break;
2144
2145 case TES_OPCODE_GET_PRIMITIVE_ID:
2146 generate_tes_get_primitive_id(p, dst);
2147 break;
2148
2149 case TCS_OPCODE_SRC0_010_IS_ZERO:
2150 /* If src_reg had stride like fs_reg, we wouldn't need this. */
2151 brw_MOV(p, brw_null_reg(), stride(src[0], 0, 1, 0));
2152 break;
2153
2154 case TCS_OPCODE_RELEASE_INPUT:
2155 generate_tcs_release_input(p, dst, src[0], src[1]);
2156 send_count++;
2157 break;
2158
2159 case TCS_OPCODE_THREAD_END:
2160 generate_tcs_thread_end(p, inst);
2161 send_count++;
2162 break;
2163
2164 case SHADER_OPCODE_BARRIER:
2165 brw_barrier(p, src[0]);
2166 brw_WAIT(p);
2167 send_count++;
2168 break;
2169
2170 case SHADER_OPCODE_MOV_INDIRECT:
2171 generate_mov_indirect(p, inst, dst, src[0], src[1]);
2172 break;
2173
2174 case BRW_OPCODE_DIM:
2175 assert(devinfo->is_haswell);
2176 assert(src[0].type == BRW_REGISTER_TYPE_DF);
2177 assert(dst.type == BRW_REGISTER_TYPE_DF);
2178 brw_DIM(p, dst, retype(src[0], BRW_REGISTER_TYPE_F));
2179 break;
2180
2181 default:
2182 unreachable("Unsupported opcode");
2183 }
2184
2185 if (inst->opcode == VEC4_OPCODE_PACK_BYTES) {
2186 /* Handled dependency hints in the generator. */
2187
2188 assert(!inst->conditional_mod);
2189 } else if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
2190 assert(p->nr_insn == pre_emit_nr_insn + 1 ||
2191 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
2192 "emitting more than 1 instruction");
2193
2194 brw_inst *last = &p->store[pre_emit_nr_insn];
2195
2196 if (inst->conditional_mod)
2197 brw_inst_set_cond_modifier(p->devinfo, last, inst->conditional_mod);
2198 brw_inst_set_no_dd_clear(p->devinfo, last, inst->no_dd_clear);
2199 brw_inst_set_no_dd_check(p->devinfo, last, inst->no_dd_check);
2200 }
2201 }
2202
2203 brw_set_uip_jip(p, 0);
2204
2205 /* end of program sentinel */
2206 disasm_new_inst_group(disasm_info, p->next_insn_offset);
2207
2208 #ifndef NDEBUG
2209 bool validated =
2210 #else
2211 if (unlikely(debug_flag))
2212 #endif
2213 brw_validate_instructions(devinfo, p->store,
2214 0, p->next_insn_offset,
2215 disasm_info);
2216
2217 int before_size = p->next_insn_offset;
2218 brw_compact_instructions(p, 0, disasm_info);
2219 int after_size = p->next_insn_offset;
2220
2221 if (unlikely(debug_flag)) {
2222 unsigned char sha1[21];
2223 char sha1buf[41];
2224
2225 _mesa_sha1_compute(p->store, p->next_insn_offset, sha1);
2226 _mesa_sha1_format(sha1buf, sha1);
2227
2228 fprintf(stderr, "Native code for %s %s shader %s (sha1 %s):\n",
2229 nir->info.label ? nir->info.label : "unnamed",
2230 _mesa_shader_stage_to_string(nir->info.stage), nir->info.name,
2231 sha1buf);
2232
2233 fprintf(stderr, "%s vec4 shader: %d instructions. %d loops. %u cycles. %d:%d "
2234 "spills:fills, %u sends. Compacted %d to %d bytes (%.0f%%)\n",
2235 stage_abbrev, before_size / 16, loop_count, perf.latency,
2236 spill_count, fill_count, send_count, before_size, after_size,
2237 100.0f * (before_size - after_size) / before_size);
2238
2239 /* overriding the shader makes disasm_info invalid */
2240 if (!brw_try_override_assembly(p, 0, sha1buf)) {
2241 dump_assembly(p->store, disasm_info, perf.block_latency);
2242 } else {
2243 fprintf(stderr, "Successfully overrode shader with sha1 %s\n\n", sha1buf);
2244 }
2245 }
2246 ralloc_free(disasm_info);
2247 assert(validated);
2248
2249 compiler->shader_debug_log(log_data,
2250 "%s vec4 shader: %d inst, %d loops, %u cycles, "
2251 "%d:%d spills:fills, %u sends, "
2252 "compacted %d to %d bytes.",
2253 stage_abbrev, before_size / 16,
2254 loop_count, perf.latency, spill_count,
2255 fill_count, send_count, before_size, after_size);
2256 if (stats) {
2257 stats->dispatch_width = 0;
2258 stats->instructions = before_size / 16;
2259 stats->sends = send_count;
2260 stats->loops = loop_count;
2261 stats->cycles = perf.latency;
2262 stats->spills = spill_count;
2263 stats->fills = fill_count;
2264 }
2265 }
2266
2267 extern "C" const unsigned *
2268 brw_vec4_generate_assembly(const struct brw_compiler *compiler,
2269 void *log_data,
2270 void *mem_ctx,
2271 const nir_shader *nir,
2272 struct brw_vue_prog_data *prog_data,
2273 const struct cfg_t *cfg,
2274 const performance &perf,
2275 struct brw_compile_stats *stats)
2276 {
2277 struct brw_codegen *p = rzalloc(mem_ctx, struct brw_codegen);
2278 brw_init_codegen(compiler->devinfo, p, mem_ctx);
2279 brw_set_default_access_mode(p, BRW_ALIGN_16);
2280
2281 generate_code(p, compiler, log_data, nir, prog_data, cfg, perf, stats);
2282
2283 return brw_get_program(p, &prog_data->base.program_size);
2284 }