i965/wm: use binding size for ubo/ssbo when automatic size is unset
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_generator.cpp
1 /* Copyright © 2011 Intel Corporation
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice (including the next
11 * paragraph) shall be included in all copies or substantial portions of the
12 * Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 * IN THE SOFTWARE.
21 */
22
23 #include "glsl/glsl_parser_extras.h"
24 #include "brw_vec4.h"
25 #include "brw_cfg.h"
26 #include "brw_eu.h"
27 #include "brw_program.h"
28
29 using namespace brw;
30
31 static void
32 generate_math1_gen4(struct brw_codegen *p,
33 vec4_instruction *inst,
34 struct brw_reg dst,
35 struct brw_reg src)
36 {
37 gen4_math(p,
38 dst,
39 brw_math_function(inst->opcode),
40 inst->base_mrf,
41 src,
42 BRW_MATH_PRECISION_FULL);
43 }
44
45 static void
46 check_gen6_math_src_arg(struct brw_reg src)
47 {
48 /* Source swizzles are ignored. */
49 assert(!src.abs);
50 assert(!src.negate);
51 assert(src.swizzle == BRW_SWIZZLE_XYZW);
52 }
53
54 static void
55 generate_math_gen6(struct brw_codegen *p,
56 vec4_instruction *inst,
57 struct brw_reg dst,
58 struct brw_reg src0,
59 struct brw_reg src1)
60 {
61 /* Can't do writemask because math can't be align16. */
62 assert(dst.writemask == WRITEMASK_XYZW);
63 /* Source swizzles are ignored. */
64 check_gen6_math_src_arg(src0);
65 if (src1.file == BRW_GENERAL_REGISTER_FILE)
66 check_gen6_math_src_arg(src1);
67
68 brw_set_default_access_mode(p, BRW_ALIGN_1);
69 gen6_math(p, dst, brw_math_function(inst->opcode), src0, src1);
70 brw_set_default_access_mode(p, BRW_ALIGN_16);
71 }
72
73 static void
74 generate_math2_gen4(struct brw_codegen *p,
75 vec4_instruction *inst,
76 struct brw_reg dst,
77 struct brw_reg src0,
78 struct brw_reg src1)
79 {
80 /* From the Ironlake PRM, Volume 4, Part 1, Section 6.1.13
81 * "Message Payload":
82 *
83 * "Operand0[7]. For the INT DIV functions, this operand is the
84 * denominator."
85 * ...
86 * "Operand1[7]. For the INT DIV functions, this operand is the
87 * numerator."
88 */
89 bool is_int_div = inst->opcode != SHADER_OPCODE_POW;
90 struct brw_reg &op0 = is_int_div ? src1 : src0;
91 struct brw_reg &op1 = is_int_div ? src0 : src1;
92
93 brw_push_insn_state(p);
94 brw_set_default_saturate(p, false);
95 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
96 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), op1.type), op1);
97 brw_pop_insn_state(p);
98
99 gen4_math(p,
100 dst,
101 brw_math_function(inst->opcode),
102 inst->base_mrf,
103 op0,
104 BRW_MATH_PRECISION_FULL);
105 }
106
107 static void
108 generate_tex(struct brw_codegen *p,
109 struct brw_vue_prog_data *prog_data,
110 vec4_instruction *inst,
111 struct brw_reg dst,
112 struct brw_reg src,
113 struct brw_reg sampler_index)
114 {
115 const struct brw_device_info *devinfo = p->devinfo;
116 int msg_type = -1;
117
118 if (devinfo->gen >= 5) {
119 switch (inst->opcode) {
120 case SHADER_OPCODE_TEX:
121 case SHADER_OPCODE_TXL:
122 if (inst->shadow_compare) {
123 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
124 } else {
125 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
126 }
127 break;
128 case SHADER_OPCODE_TXD:
129 if (inst->shadow_compare) {
130 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
131 assert(devinfo->gen >= 8 || devinfo->is_haswell);
132 msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE;
133 } else {
134 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
135 }
136 break;
137 case SHADER_OPCODE_TXF:
138 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
139 break;
140 case SHADER_OPCODE_TXF_CMS_W:
141 assert(devinfo->gen >= 9);
142 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W;
143 break;
144 case SHADER_OPCODE_TXF_CMS:
145 if (devinfo->gen >= 7)
146 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
147 else
148 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
149 break;
150 case SHADER_OPCODE_TXF_MCS:
151 assert(devinfo->gen >= 7);
152 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
153 break;
154 case SHADER_OPCODE_TXS:
155 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
156 break;
157 case SHADER_OPCODE_TG4:
158 if (inst->shadow_compare) {
159 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C;
160 } else {
161 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
162 }
163 break;
164 case SHADER_OPCODE_TG4_OFFSET:
165 if (inst->shadow_compare) {
166 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C;
167 } else {
168 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
169 }
170 break;
171 case SHADER_OPCODE_SAMPLEINFO:
172 msg_type = GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
173 break;
174 default:
175 unreachable("should not get here: invalid vec4 texture opcode");
176 }
177 } else {
178 switch (inst->opcode) {
179 case SHADER_OPCODE_TEX:
180 case SHADER_OPCODE_TXL:
181 if (inst->shadow_compare) {
182 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD_COMPARE;
183 assert(inst->mlen == 3);
184 } else {
185 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD;
186 assert(inst->mlen == 2);
187 }
188 break;
189 case SHADER_OPCODE_TXD:
190 /* There is no sample_d_c message; comparisons are done manually. */
191 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS;
192 assert(inst->mlen == 4);
193 break;
194 case SHADER_OPCODE_TXF:
195 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_LD;
196 assert(inst->mlen == 2);
197 break;
198 case SHADER_OPCODE_TXS:
199 msg_type = BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO;
200 assert(inst->mlen == 2);
201 break;
202 default:
203 unreachable("should not get here: invalid vec4 texture opcode");
204 }
205 }
206
207 assert(msg_type != -1);
208
209 assert(sampler_index.type == BRW_REGISTER_TYPE_UD);
210
211 /* Load the message header if present. If there's a texture offset, we need
212 * to set it up explicitly and load the offset bitfield. Otherwise, we can
213 * use an implied move from g0 to the first message register.
214 */
215 if (inst->header_size != 0) {
216 if (devinfo->gen < 6 && !inst->offset) {
217 /* Set up an implied move from g0 to the MRF. */
218 src = brw_vec8_grf(0, 0);
219 } else {
220 struct brw_reg header =
221 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD);
222 uint32_t dw2 = 0;
223
224 /* Explicitly set up the message header by copying g0 to the MRF. */
225 brw_push_insn_state(p);
226 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
227 brw_MOV(p, header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
228
229 brw_set_default_access_mode(p, BRW_ALIGN_1);
230
231 if (inst->offset)
232 /* Set the texel offset bits in DWord 2. */
233 dw2 = inst->offset;
234
235 if (devinfo->gen >= 9)
236 /* SKL+ overloads BRW_SAMPLER_SIMD_MODE_SIMD4X2 to also do SIMD8D,
237 * based on bit 22 in the header.
238 */
239 dw2 |= GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2;
240
241 if (dw2)
242 brw_MOV(p, get_element_ud(header, 2), brw_imm_ud(dw2));
243
244 brw_adjust_sampler_state_pointer(p, header, sampler_index);
245 brw_pop_insn_state(p);
246 }
247 }
248
249 uint32_t return_format;
250
251 switch (dst.type) {
252 case BRW_REGISTER_TYPE_D:
253 return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
254 break;
255 case BRW_REGISTER_TYPE_UD:
256 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
257 break;
258 default:
259 return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
260 break;
261 }
262
263 uint32_t base_binding_table_index = (inst->opcode == SHADER_OPCODE_TG4 ||
264 inst->opcode == SHADER_OPCODE_TG4_OFFSET)
265 ? prog_data->base.binding_table.gather_texture_start
266 : prog_data->base.binding_table.texture_start;
267
268 if (sampler_index.file == BRW_IMMEDIATE_VALUE) {
269 uint32_t sampler = sampler_index.ud;
270
271 brw_SAMPLE(p,
272 dst,
273 inst->base_mrf,
274 src,
275 sampler + base_binding_table_index,
276 sampler % 16,
277 msg_type,
278 1, /* response length */
279 inst->mlen,
280 inst->header_size != 0,
281 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
282 return_format);
283
284 brw_mark_surface_used(&prog_data->base, sampler + base_binding_table_index);
285 } else {
286 /* Non-constant sampler index. */
287
288 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
289 struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD));
290
291 brw_push_insn_state(p);
292 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
293 brw_set_default_access_mode(p, BRW_ALIGN_1);
294
295 /* addr = ((sampler * 0x101) + base_binding_table_index) & 0xfff */
296 brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
297 if (base_binding_table_index)
298 brw_ADD(p, addr, addr, brw_imm_ud(base_binding_table_index));
299 brw_AND(p, addr, addr, brw_imm_ud(0xfff));
300
301 brw_pop_insn_state(p);
302
303 if (inst->base_mrf != -1)
304 gen6_resolve_implied_move(p, &src, inst->base_mrf);
305
306 /* dst = send(offset, a0.0 | <descriptor>) */
307 brw_inst *insn = brw_send_indirect_message(
308 p, BRW_SFID_SAMPLER, dst, src, addr);
309 brw_set_sampler_message(p, insn,
310 0 /* surface */,
311 0 /* sampler */,
312 msg_type,
313 1 /* rlen */,
314 inst->mlen /* mlen */,
315 inst->header_size != 0 /* header */,
316 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
317 return_format);
318
319 /* visitor knows more than we do about the surface limit required,
320 * so has already done marking.
321 */
322 }
323 }
324
325 static void
326 generate_vs_urb_write(struct brw_codegen *p, vec4_instruction *inst)
327 {
328 brw_urb_WRITE(p,
329 brw_null_reg(), /* dest */
330 inst->base_mrf, /* starting mrf reg nr */
331 brw_vec8_grf(0, 0), /* src */
332 inst->urb_write_flags,
333 inst->mlen,
334 0, /* response len */
335 inst->offset, /* urb destination offset */
336 BRW_URB_SWIZZLE_INTERLEAVE);
337 }
338
339 static void
340 generate_gs_urb_write(struct brw_codegen *p, vec4_instruction *inst)
341 {
342 struct brw_reg src = brw_message_reg(inst->base_mrf);
343 brw_urb_WRITE(p,
344 brw_null_reg(), /* dest */
345 inst->base_mrf, /* starting mrf reg nr */
346 src,
347 inst->urb_write_flags,
348 inst->mlen,
349 0, /* response len */
350 inst->offset, /* urb destination offset */
351 BRW_URB_SWIZZLE_INTERLEAVE);
352 }
353
354 static void
355 generate_gs_urb_write_allocate(struct brw_codegen *p, vec4_instruction *inst)
356 {
357 struct brw_reg src = brw_message_reg(inst->base_mrf);
358
359 /* We pass the temporary passed in src0 as the writeback register */
360 brw_urb_WRITE(p,
361 inst->src[0].as_brw_reg(), /* dest */
362 inst->base_mrf, /* starting mrf reg nr */
363 src,
364 BRW_URB_WRITE_ALLOCATE_COMPLETE,
365 inst->mlen,
366 1, /* response len */
367 inst->offset, /* urb destination offset */
368 BRW_URB_SWIZZLE_INTERLEAVE);
369
370 /* Now put allocated urb handle in dst.0 */
371 brw_push_insn_state(p);
372 brw_set_default_access_mode(p, BRW_ALIGN_1);
373 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
374 brw_MOV(p, get_element_ud(inst->dst.as_brw_reg(), 0),
375 get_element_ud(inst->src[0].as_brw_reg(), 0));
376 brw_pop_insn_state(p);
377 }
378
379 static void
380 generate_gs_thread_end(struct brw_codegen *p, vec4_instruction *inst)
381 {
382 struct brw_reg src = brw_message_reg(inst->base_mrf);
383 brw_urb_WRITE(p,
384 brw_null_reg(), /* dest */
385 inst->base_mrf, /* starting mrf reg nr */
386 src,
387 BRW_URB_WRITE_EOT | inst->urb_write_flags,
388 inst->mlen,
389 0, /* response len */
390 0, /* urb destination offset */
391 BRW_URB_SWIZZLE_INTERLEAVE);
392 }
393
394 static void
395 generate_gs_set_write_offset(struct brw_codegen *p,
396 struct brw_reg dst,
397 struct brw_reg src0,
398 struct brw_reg src1)
399 {
400 /* From p22 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
401 * Header: M0.3):
402 *
403 * Slot 0 Offset. This field, after adding to the Global Offset field
404 * in the message descriptor, specifies the offset (in 256-bit units)
405 * from the start of the URB entry, as referenced by URB Handle 0, at
406 * which the data will be accessed.
407 *
408 * Similar text describes DWORD M0.4, which is slot 1 offset.
409 *
410 * Therefore, we want to multiply DWORDs 0 and 4 of src0 (the x components
411 * of the register for geometry shader invocations 0 and 1) by the
412 * immediate value in src1, and store the result in DWORDs 3 and 4 of dst.
413 *
414 * We can do this with the following EU instruction:
415 *
416 * mul(2) dst.3<1>UD src0<8;2,4>UD src1<...>UW { Align1 WE_all }
417 */
418 brw_push_insn_state(p);
419 brw_set_default_access_mode(p, BRW_ALIGN_1);
420 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
421 assert(p->devinfo->gen >= 7 &&
422 src1.file == BRW_IMMEDIATE_VALUE &&
423 src1.type == BRW_REGISTER_TYPE_UD &&
424 src1.ud <= USHRT_MAX);
425 if (src0.file == BRW_IMMEDIATE_VALUE) {
426 brw_MOV(p, suboffset(stride(dst, 2, 2, 1), 3),
427 brw_imm_ud(src0.ud * src1.ud));
428 } else {
429 brw_MUL(p, suboffset(stride(dst, 2, 2, 1), 3), stride(src0, 8, 2, 4),
430 retype(src1, BRW_REGISTER_TYPE_UW));
431 }
432 brw_pop_insn_state(p);
433 }
434
435 static void
436 generate_gs_set_vertex_count(struct brw_codegen *p,
437 struct brw_reg dst,
438 struct brw_reg src)
439 {
440 brw_push_insn_state(p);
441 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
442
443 if (p->devinfo->gen >= 8) {
444 /* Move the vertex count into the second MRF for the EOT write. */
445 brw_MOV(p, retype(brw_message_reg(dst.nr + 1), BRW_REGISTER_TYPE_UD),
446 src);
447 } else {
448 /* If we think of the src and dst registers as composed of 8 DWORDs each,
449 * we want to pick up the contents of DWORDs 0 and 4 from src, truncate
450 * them to WORDs, and then pack them into DWORD 2 of dst.
451 *
452 * It's easier to get the EU to do this if we think of the src and dst
453 * registers as composed of 16 WORDS each; then, we want to pick up the
454 * contents of WORDs 0 and 8 from src, and pack them into WORDs 4 and 5
455 * of dst.
456 *
457 * We can do that by the following EU instruction:
458 *
459 * mov (2) dst.4<1>:uw src<8;1,0>:uw { Align1, Q1, NoMask }
460 */
461 brw_set_default_access_mode(p, BRW_ALIGN_1);
462 brw_MOV(p,
463 suboffset(stride(retype(dst, BRW_REGISTER_TYPE_UW), 2, 2, 1), 4),
464 stride(retype(src, BRW_REGISTER_TYPE_UW), 8, 1, 0));
465 }
466 brw_pop_insn_state(p);
467 }
468
469 static void
470 generate_gs_svb_write(struct brw_codegen *p,
471 struct brw_vue_prog_data *prog_data,
472 vec4_instruction *inst,
473 struct brw_reg dst,
474 struct brw_reg src0,
475 struct brw_reg src1)
476 {
477 int binding = inst->sol_binding;
478 bool final_write = inst->sol_final_write;
479
480 brw_push_insn_state(p);
481 /* Copy Vertex data into M0.x */
482 brw_MOV(p, stride(dst, 4, 4, 1),
483 stride(retype(src0, BRW_REGISTER_TYPE_UD), 4, 4, 1));
484
485 /* Send SVB Write */
486 brw_svb_write(p,
487 final_write ? src1 : brw_null_reg(), /* dest == src1 */
488 1, /* msg_reg_nr */
489 dst, /* src0 == previous dst */
490 SURF_INDEX_GEN6_SOL_BINDING(binding), /* binding_table_index */
491 final_write); /* send_commit_msg */
492
493 /* Finally, wait for the write commit to occur so that we can proceed to
494 * other things safely.
495 *
496 * From the Sandybridge PRM, Volume 4, Part 1, Section 3.3:
497 *
498 * The write commit does not modify the destination register, but
499 * merely clears the dependency associated with the destination
500 * register. Thus, a simple “mov” instruction using the register as a
501 * source is sufficient to wait for the write commit to occur.
502 */
503 if (final_write) {
504 brw_MOV(p, src1, src1);
505 }
506 brw_pop_insn_state(p);
507 }
508
509 static void
510 generate_gs_svb_set_destination_index(struct brw_codegen *p,
511 vec4_instruction *inst,
512 struct brw_reg dst,
513 struct brw_reg src)
514 {
515 int vertex = inst->sol_vertex;
516 brw_push_insn_state(p);
517 brw_set_default_access_mode(p, BRW_ALIGN_1);
518 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
519 brw_MOV(p, get_element_ud(dst, 5), get_element_ud(src, vertex));
520 brw_pop_insn_state(p);
521 }
522
523 static void
524 generate_gs_set_dword_2(struct brw_codegen *p,
525 struct brw_reg dst,
526 struct brw_reg src)
527 {
528 brw_push_insn_state(p);
529 brw_set_default_access_mode(p, BRW_ALIGN_1);
530 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
531 brw_MOV(p, suboffset(vec1(dst), 2), suboffset(vec1(src), 0));
532 brw_pop_insn_state(p);
533 }
534
535 static void
536 generate_gs_prepare_channel_masks(struct brw_codegen *p,
537 struct brw_reg dst)
538 {
539 /* We want to left shift just DWORD 4 (the x component belonging to the
540 * second geometry shader invocation) by 4 bits. So generate the
541 * instruction:
542 *
543 * shl(1) dst.4<1>UD dst.4<0,1,0>UD 4UD { align1 WE_all }
544 */
545 dst = suboffset(vec1(dst), 4);
546 brw_push_insn_state(p);
547 brw_set_default_access_mode(p, BRW_ALIGN_1);
548 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
549 brw_SHL(p, dst, dst, brw_imm_ud(4));
550 brw_pop_insn_state(p);
551 }
552
553 static void
554 generate_gs_set_channel_masks(struct brw_codegen *p,
555 struct brw_reg dst,
556 struct brw_reg src)
557 {
558 /* From p21 of volume 4 part 2 of the Ivy Bridge PRM (2.4.3.1 Message
559 * Header: M0.5):
560 *
561 * 15 Vertex 1 DATA [3] / Vertex 0 DATA[7] Channel Mask
562 *
563 * When Swizzle Control = URB_INTERLEAVED this bit controls Vertex 1
564 * DATA[3], when Swizzle Control = URB_NOSWIZZLE this bit controls
565 * Vertex 0 DATA[7]. This bit is ANDed with the corresponding
566 * channel enable to determine the final channel enable. For the
567 * URB_READ_OWORD & URB_READ_HWORD messages, when final channel
568 * enable is 1 it indicates that Vertex 1 DATA [3] will be included
569 * in the writeback message. For the URB_WRITE_OWORD &
570 * URB_WRITE_HWORD messages, when final channel enable is 1 it
571 * indicates that Vertex 1 DATA [3] will be written to the surface.
572 *
573 * 0: Vertex 1 DATA [3] / Vertex 0 DATA[7] channel not included
574 * 1: Vertex DATA [3] / Vertex 0 DATA[7] channel included
575 *
576 * 14 Vertex 1 DATA [2] Channel Mask
577 * 13 Vertex 1 DATA [1] Channel Mask
578 * 12 Vertex 1 DATA [0] Channel Mask
579 * 11 Vertex 0 DATA [3] Channel Mask
580 * 10 Vertex 0 DATA [2] Channel Mask
581 * 9 Vertex 0 DATA [1] Channel Mask
582 * 8 Vertex 0 DATA [0] Channel Mask
583 *
584 * (This is from a section of the PRM that is agnostic to the particular
585 * type of shader being executed, so "Vertex 0" and "Vertex 1" refer to
586 * geometry shader invocations 0 and 1, respectively). Since we have the
587 * enable flags for geometry shader invocation 0 in bits 3:0 of DWORD 0,
588 * and the enable flags for geometry shader invocation 1 in bits 7:0 of
589 * DWORD 4, we just need to OR them together and store the result in bits
590 * 15:8 of DWORD 5.
591 *
592 * It's easier to get the EU to do this if we think of the src and dst
593 * registers as composed of 32 bytes each; then, we want to pick up the
594 * contents of bytes 0 and 16 from src, OR them together, and store them in
595 * byte 21.
596 *
597 * We can do that by the following EU instruction:
598 *
599 * or(1) dst.21<1>UB src<0,1,0>UB src.16<0,1,0>UB { align1 WE_all }
600 *
601 * Note: this relies on the source register having zeros in (a) bits 7:4 of
602 * DWORD 0 and (b) bits 3:0 of DWORD 4. We can rely on (b) because the
603 * source register was prepared by GS_OPCODE_PREPARE_CHANNEL_MASKS (which
604 * shifts DWORD 4 left by 4 bits), and we can rely on (a) because prior to
605 * the execution of GS_OPCODE_PREPARE_CHANNEL_MASKS, DWORDs 0 and 4 need to
606 * contain valid channel mask values (which are in the range 0x0-0xf).
607 */
608 dst = retype(dst, BRW_REGISTER_TYPE_UB);
609 src = retype(src, BRW_REGISTER_TYPE_UB);
610 brw_push_insn_state(p);
611 brw_set_default_access_mode(p, BRW_ALIGN_1);
612 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
613 brw_OR(p, suboffset(vec1(dst), 21), vec1(src), suboffset(vec1(src), 16));
614 brw_pop_insn_state(p);
615 }
616
617 static void
618 generate_gs_get_instance_id(struct brw_codegen *p,
619 struct brw_reg dst)
620 {
621 /* We want to right shift R0.0 & R0.1 by GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT
622 * and store into dst.0 & dst.4. So generate the instruction:
623 *
624 * shr(8) dst<1> R0<1,4,0> GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT { align1 WE_normal 1Q }
625 */
626 brw_push_insn_state(p);
627 brw_set_default_access_mode(p, BRW_ALIGN_1);
628 dst = retype(dst, BRW_REGISTER_TYPE_UD);
629 struct brw_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
630 brw_SHR(p, dst, stride(r0, 1, 4, 0),
631 brw_imm_ud(GEN7_GS_PAYLOAD_INSTANCE_ID_SHIFT));
632 brw_pop_insn_state(p);
633 }
634
635 static void
636 generate_gs_ff_sync_set_primitives(struct brw_codegen *p,
637 struct brw_reg dst,
638 struct brw_reg src0,
639 struct brw_reg src1,
640 struct brw_reg src2)
641 {
642 brw_push_insn_state(p);
643 brw_set_default_access_mode(p, BRW_ALIGN_1);
644 /* Save src0 data in 16:31 bits of dst.0 */
645 brw_AND(p, suboffset(vec1(dst), 0), suboffset(vec1(src0), 0),
646 brw_imm_ud(0xffffu));
647 brw_SHL(p, suboffset(vec1(dst), 0), suboffset(vec1(dst), 0), brw_imm_ud(16));
648 /* Save src1 data in 0:15 bits of dst.0 */
649 brw_AND(p, suboffset(vec1(src2), 0), suboffset(vec1(src1), 0),
650 brw_imm_ud(0xffffu));
651 brw_OR(p, suboffset(vec1(dst), 0),
652 suboffset(vec1(dst), 0),
653 suboffset(vec1(src2), 0));
654 brw_pop_insn_state(p);
655 }
656
657 static void
658 generate_gs_ff_sync(struct brw_codegen *p,
659 vec4_instruction *inst,
660 struct brw_reg dst,
661 struct brw_reg src0,
662 struct brw_reg src1)
663 {
664 /* This opcode uses an implied MRF register for:
665 * - the header of the ff_sync message. And as such it is expected to be
666 * initialized to r0 before calling here.
667 * - the destination where we will write the allocated URB handle.
668 */
669 struct brw_reg header =
670 retype(brw_message_reg(inst->base_mrf), BRW_REGISTER_TYPE_UD);
671
672 /* Overwrite dword 0 of the header (SO vertices to write) and
673 * dword 1 (number of primitives written).
674 */
675 brw_push_insn_state(p);
676 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
677 brw_set_default_access_mode(p, BRW_ALIGN_1);
678 brw_MOV(p, get_element_ud(header, 0), get_element_ud(src1, 0));
679 brw_MOV(p, get_element_ud(header, 1), get_element_ud(src0, 0));
680 brw_pop_insn_state(p);
681
682 /* Allocate URB handle in dst */
683 brw_ff_sync(p,
684 dst,
685 0,
686 header,
687 1, /* allocate */
688 1, /* response length */
689 0 /* eot */);
690
691 /* Now put allocated urb handle in header.0 */
692 brw_push_insn_state(p);
693 brw_set_default_access_mode(p, BRW_ALIGN_1);
694 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
695 brw_MOV(p, get_element_ud(header, 0), get_element_ud(dst, 0));
696
697 /* src1 is not an immediate when we use transform feedback */
698 if (src1.file != BRW_IMMEDIATE_VALUE)
699 brw_MOV(p, brw_vec4_grf(src1.nr, 0), brw_vec4_grf(dst.nr, 1));
700
701 brw_pop_insn_state(p);
702 }
703
704 static void
705 generate_gs_set_primitive_id(struct brw_codegen *p, struct brw_reg dst)
706 {
707 /* In gen6, PrimitiveID is delivered in R0.1 of the payload */
708 struct brw_reg src = brw_vec8_grf(0, 0);
709 brw_push_insn_state(p);
710 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
711 brw_set_default_access_mode(p, BRW_ALIGN_1);
712 brw_MOV(p, get_element_ud(dst, 0), get_element_ud(src, 1));
713 brw_pop_insn_state(p);
714 }
715
716 static void
717 generate_tcs_get_instance_id(struct brw_codegen *p, struct brw_reg dst)
718 {
719 const struct brw_device_info *devinfo = p->devinfo;
720 const bool ivb = devinfo->is_ivybridge || devinfo->is_baytrail;
721
722 /* "Instance Count" comes as part of the payload in r0.2 bits 23:17.
723 *
724 * Since we operate in SIMD4x2 mode, we need run half as many threads
725 * as necessary. So we assign (2i + 1, 2i) as the thread counts. We
726 * shift right by one less to accomplish the multiplication by two.
727 */
728 dst = retype(dst, BRW_REGISTER_TYPE_UD);
729 struct brw_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
730
731 brw_push_insn_state(p);
732 brw_set_default_access_mode(p, BRW_ALIGN_1);
733
734 const int mask = ivb ? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
735 const int shift = ivb ? 16 : 17;
736
737 brw_AND(p, get_element_ud(dst, 0), get_element_ud(r0, 2), brw_imm_ud(mask));
738 brw_SHR(p, get_element_ud(dst, 0), get_element_ud(dst, 0),
739 brw_imm_ud(shift - 1));
740 brw_ADD(p, get_element_ud(dst, 4), get_element_ud(dst, 0), brw_imm_ud(1));
741
742 brw_pop_insn_state(p);
743 }
744
745 static void
746 generate_tcs_urb_write(struct brw_codegen *p,
747 vec4_instruction *inst,
748 struct brw_reg urb_header)
749 {
750 const struct brw_device_info *devinfo = p->devinfo;
751
752 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
753 brw_set_dest(p, send, brw_null_reg());
754 brw_set_src0(p, send, urb_header);
755
756 brw_set_message_descriptor(p, send, BRW_SFID_URB,
757 inst->mlen /* mlen */, 0 /* rlen */,
758 true /* header */, false /* eot */);
759 brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_WRITE_OWORD);
760 brw_inst_set_urb_global_offset(devinfo, send, inst->offset);
761 if (inst->urb_write_flags & BRW_URB_WRITE_EOT) {
762 brw_inst_set_eot(devinfo, send, 1);
763 } else {
764 brw_inst_set_urb_per_slot_offset(devinfo, send, 1);
765 brw_inst_set_urb_swizzle_control(devinfo, send, BRW_URB_SWIZZLE_INTERLEAVE);
766 }
767
768 /* what happens to swizzles? */
769 }
770
771
772 static void
773 generate_tcs_input_urb_offsets(struct brw_codegen *p,
774 struct brw_reg dst,
775 struct brw_reg vertex,
776 struct brw_reg offset)
777 {
778 /* Generates an URB read/write message header for HS/DS operation.
779 * Inputs are a vertex index, and a byte offset from the beginning of
780 * the vertex. */
781
782 /* If `vertex` is not an immediate, we clobber a0.0 */
783
784 assert(vertex.file == BRW_IMMEDIATE_VALUE || vertex.file == BRW_GENERAL_REGISTER_FILE);
785 assert(vertex.type == BRW_REGISTER_TYPE_UD || vertex.type == BRW_REGISTER_TYPE_D);
786
787 assert(dst.file == BRW_GENERAL_REGISTER_FILE);
788
789 brw_push_insn_state(p);
790 brw_set_default_access_mode(p, BRW_ALIGN_1);
791 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
792 brw_MOV(p, dst, brw_imm_ud(0));
793
794 /* m0.5 bits 8-15 are channel enables */
795 brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud(0xff00));
796
797 /* m0.0-0.1: URB handles */
798 if (vertex.file == BRW_IMMEDIATE_VALUE) {
799 uint32_t vertex_index = vertex.ud;
800 struct brw_reg index_reg = brw_vec1_grf(
801 1 + (vertex_index >> 3), vertex_index & 7);
802
803 brw_MOV(p, vec2(get_element_ud(dst, 0)),
804 retype(index_reg, BRW_REGISTER_TYPE_UD));
805 } else {
806 /* Use indirect addressing. ICP Handles are DWords (single channels
807 * of a register) and start at g1.0.
808 *
809 * In order to start our region at g1.0, we add 8 to the vertex index,
810 * effectively skipping over the 8 channels in g0.0. This gives us a
811 * DWord offset to the ICP Handle.
812 *
813 * Indirect addressing works in terms of bytes, so we then multiply
814 * the DWord offset by 4 (by shifting left by 2).
815 */
816 struct brw_reg addr = brw_address_reg(0);
817
818 /* bottom half: m0.0 = g[1.0 + vertex.0]UD */
819 brw_ADD(p, addr, get_element_ud(vertex, 0), brw_imm_uw(0x8));
820 brw_SHL(p, addr, addr, brw_imm_ud(2));
821 brw_MOV(p, get_element_ud(dst, 0), deref_1ud(brw_indirect(0, 0), 0));
822
823 /* top half: m0.1 = g[1.0 + vertex.4]UD */
824 brw_ADD(p, addr, get_element_ud(vertex, 4), brw_imm_uw(0x8));
825 brw_SHL(p, addr, addr, brw_imm_ud(2));
826 brw_MOV(p, get_element_ud(dst, 1), deref_1ud(brw_indirect(0, 0), 0));
827 }
828
829 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
830 if (offset.file != ARF)
831 brw_MOV(p, vec2(get_element_ud(dst, 3)), stride(offset, 4, 1, 0));
832
833 brw_pop_insn_state(p);
834 }
835
836
837 static void
838 generate_tcs_output_urb_offsets(struct brw_codegen *p,
839 struct brw_reg dst,
840 struct brw_reg write_mask,
841 struct brw_reg offset)
842 {
843 /* Generates an URB read/write message header for HS/DS operation, for the patch URB entry. */
844 assert(dst.file == BRW_GENERAL_REGISTER_FILE || dst.file == BRW_MESSAGE_REGISTER_FILE);
845
846 assert(write_mask.file == BRW_IMMEDIATE_VALUE);
847 assert(write_mask.type == BRW_REGISTER_TYPE_UD);
848
849 brw_push_insn_state(p);
850
851 brw_set_default_access_mode(p, BRW_ALIGN_1);
852 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
853 brw_MOV(p, dst, brw_imm_ud(0));
854
855 unsigned mask = write_mask.ud;
856
857 /* m0.5 bits 15:12 and 11:8 are channel enables */
858 brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud((mask << 8) | (mask << 12)));
859
860 /* HS patch URB handle is delivered in r0.0 */
861 struct brw_reg urb_handle = brw_vec1_grf(0, 0);
862
863 /* m0.0-0.1: URB handles */
864 brw_MOV(p, vec2(get_element_ud(dst, 0)),
865 retype(urb_handle, BRW_REGISTER_TYPE_UD));
866
867 /* m0.3-0.4: 128bit-granular offsets into the URB from the handles */
868 if (offset.file != ARF)
869 brw_MOV(p, vec2(get_element_ud(dst, 3)), stride(offset, 4, 1, 0));
870
871 brw_pop_insn_state(p);
872 }
873
874 static void
875 generate_tes_create_input_read_header(struct brw_codegen *p,
876 struct brw_reg dst)
877 {
878 brw_push_insn_state(p);
879 brw_set_default_access_mode(p, BRW_ALIGN_1);
880 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
881
882 /* Initialize the register to 0 */
883 brw_MOV(p, dst, brw_imm_ud(0));
884
885 /* Enable all the channels in m0.5 bits 15:8 */
886 brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud(0xff00));
887
888 /* Copy g1.3 (the patch URB handle) to m0.0 and m0.1. For safety,
889 * mask out irrelevant "Reserved" bits, as they're not marked MBZ.
890 */
891 brw_AND(p, vec2(get_element_ud(dst, 0)),
892 retype(brw_vec1_grf(1, 3), BRW_REGISTER_TYPE_UD),
893 brw_imm_ud(0x1fff));
894 brw_pop_insn_state(p);
895 }
896
897 static void
898 generate_tes_add_indirect_urb_offset(struct brw_codegen *p,
899 struct brw_reg dst,
900 struct brw_reg header,
901 struct brw_reg offset)
902 {
903 brw_push_insn_state(p);
904 brw_set_default_access_mode(p, BRW_ALIGN_1);
905 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
906
907 brw_MOV(p, dst, header);
908 /* m0.3-0.4: 128-bit-granular offsets into the URB from the handles */
909 brw_MOV(p, vec2(get_element_ud(dst, 3)), stride(offset, 4, 1, 0));
910
911 brw_pop_insn_state(p);
912 }
913
914 static void
915 generate_vec4_urb_read(struct brw_codegen *p,
916 vec4_instruction *inst,
917 struct brw_reg dst,
918 struct brw_reg header)
919 {
920 const struct brw_device_info *devinfo = p->devinfo;
921
922 assert(header.file == BRW_GENERAL_REGISTER_FILE);
923 assert(header.type == BRW_REGISTER_TYPE_UD);
924
925 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
926 brw_set_dest(p, send, dst);
927 brw_set_src0(p, send, header);
928
929 brw_set_message_descriptor(p, send, BRW_SFID_URB,
930 1 /* mlen */, 1 /* rlen */,
931 true /* header */, false /* eot */);
932 brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_READ_OWORD);
933 brw_inst_set_urb_swizzle_control(devinfo, send, BRW_URB_SWIZZLE_INTERLEAVE);
934 brw_inst_set_urb_per_slot_offset(devinfo, send, 1);
935
936 brw_inst_set_urb_global_offset(devinfo, send, inst->offset);
937 }
938
939 static void
940 generate_tcs_release_input(struct brw_codegen *p,
941 struct brw_reg header,
942 struct brw_reg vertex,
943 struct brw_reg is_unpaired)
944 {
945 const struct brw_device_info *devinfo = p->devinfo;
946
947 assert(vertex.file == BRW_IMMEDIATE_VALUE);
948 assert(vertex.type == BRW_REGISTER_TYPE_UD);
949
950 /* m0.0-0.1: URB handles */
951 struct brw_reg urb_handles =
952 retype(brw_vec2_grf(1 + (vertex.ud >> 3), vertex.ud & 7),
953 BRW_REGISTER_TYPE_UD);
954
955 brw_push_insn_state(p);
956 brw_set_default_access_mode(p, BRW_ALIGN_1);
957 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
958 brw_MOV(p, header, brw_imm_ud(0));
959 brw_MOV(p, vec2(get_element_ud(header, 0)), urb_handles);
960 brw_pop_insn_state(p);
961
962 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
963 brw_set_dest(p, send, brw_null_reg());
964 brw_set_src0(p, send, header);
965 brw_set_message_descriptor(p, send, BRW_SFID_URB,
966 1 /* mlen */, 0 /* rlen */,
967 true /* header */, false /* eot */);
968 brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_READ_OWORD);
969 brw_inst_set_urb_complete(devinfo, send, 1);
970 brw_inst_set_urb_swizzle_control(devinfo, send, is_unpaired.ud ?
971 BRW_URB_SWIZZLE_NONE :
972 BRW_URB_SWIZZLE_INTERLEAVE);
973 }
974
975 static void
976 generate_tcs_thread_end(struct brw_codegen *p, vec4_instruction *inst)
977 {
978 struct brw_reg header = brw_message_reg(inst->base_mrf);
979
980 brw_push_insn_state(p);
981 brw_set_default_access_mode(p, BRW_ALIGN_1);
982 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
983 brw_MOV(p, header, brw_imm_ud(0));
984 brw_MOV(p, get_element_ud(header, 0),
985 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD));
986 brw_pop_insn_state(p);
987
988 brw_urb_WRITE(p,
989 brw_null_reg(), /* dest */
990 inst->base_mrf, /* starting mrf reg nr */
991 header,
992 BRW_URB_WRITE_EOT | inst->urb_write_flags,
993 inst->mlen,
994 0, /* response len */
995 0, /* urb destination offset */
996 0);
997 }
998
999 static void
1000 generate_tes_get_primitive_id(struct brw_codegen *p, struct brw_reg dst)
1001 {
1002 brw_push_insn_state(p);
1003 brw_set_default_access_mode(p, BRW_ALIGN_1);
1004 brw_MOV(p, dst, retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_D));
1005 brw_pop_insn_state(p);
1006 }
1007
1008 static void
1009 generate_tcs_get_primitive_id(struct brw_codegen *p, struct brw_reg dst)
1010 {
1011 brw_push_insn_state(p);
1012 brw_set_default_access_mode(p, BRW_ALIGN_1);
1013 brw_MOV(p, dst, retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
1014 brw_pop_insn_state(p);
1015 }
1016
1017 static void
1018 generate_tcs_create_barrier_header(struct brw_codegen *p,
1019 struct brw_vue_prog_data *prog_data,
1020 struct brw_reg dst)
1021 {
1022 const struct brw_device_info *devinfo = p->devinfo;
1023 const bool ivb = devinfo->is_ivybridge || devinfo->is_baytrail;
1024 struct brw_reg m0_2 = get_element_ud(dst, 2);
1025 unsigned instances = ((struct brw_tcs_prog_data *) prog_data)->instances;
1026
1027 brw_push_insn_state(p);
1028 brw_set_default_access_mode(p, BRW_ALIGN_1);
1029 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1030
1031 /* Zero the message header */
1032 brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
1033
1034 /* Copy "Barrier ID" from r0.2, bits 16:13 (Gen7.5+) or 15:12 (Gen7) */
1035 brw_AND(p, m0_2,
1036 retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
1037 brw_imm_ud(ivb ? INTEL_MASK(15, 12) : INTEL_MASK(16, 13)));
1038
1039 /* Shift it up to bits 27:24. */
1040 brw_SHL(p, m0_2, get_element_ud(dst, 2), brw_imm_ud(ivb ? 12 : 11));
1041
1042 /* Set the Barrier Count and the enable bit */
1043 brw_OR(p, m0_2, m0_2, brw_imm_ud(instances << 9 | (1 << 15)));
1044
1045 brw_pop_insn_state(p);
1046 }
1047
1048 static void
1049 generate_oword_dual_block_offsets(struct brw_codegen *p,
1050 struct brw_reg m1,
1051 struct brw_reg index)
1052 {
1053 int second_vertex_offset;
1054
1055 if (p->devinfo->gen >= 6)
1056 second_vertex_offset = 1;
1057 else
1058 second_vertex_offset = 16;
1059
1060 m1 = retype(m1, BRW_REGISTER_TYPE_D);
1061
1062 /* Set up M1 (message payload). Only the block offsets in M1.0 and
1063 * M1.4 are used, and the rest are ignored.
1064 */
1065 struct brw_reg m1_0 = suboffset(vec1(m1), 0);
1066 struct brw_reg m1_4 = suboffset(vec1(m1), 4);
1067 struct brw_reg index_0 = suboffset(vec1(index), 0);
1068 struct brw_reg index_4 = suboffset(vec1(index), 4);
1069
1070 brw_push_insn_state(p);
1071 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1072 brw_set_default_access_mode(p, BRW_ALIGN_1);
1073
1074 brw_MOV(p, m1_0, index_0);
1075
1076 if (index.file == BRW_IMMEDIATE_VALUE) {
1077 index_4.ud += second_vertex_offset;
1078 brw_MOV(p, m1_4, index_4);
1079 } else {
1080 brw_ADD(p, m1_4, index_4, brw_imm_d(second_vertex_offset));
1081 }
1082
1083 brw_pop_insn_state(p);
1084 }
1085
1086 static void
1087 generate_unpack_flags(struct brw_codegen *p,
1088 struct brw_reg dst)
1089 {
1090 brw_push_insn_state(p);
1091 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1092 brw_set_default_access_mode(p, BRW_ALIGN_1);
1093
1094 struct brw_reg flags = brw_flag_reg(0, 0);
1095 struct brw_reg dst_0 = suboffset(vec1(dst), 0);
1096 struct brw_reg dst_4 = suboffset(vec1(dst), 4);
1097
1098 brw_AND(p, dst_0, flags, brw_imm_ud(0x0f));
1099 brw_AND(p, dst_4, flags, brw_imm_ud(0xf0));
1100 brw_SHR(p, dst_4, dst_4, brw_imm_ud(4));
1101
1102 brw_pop_insn_state(p);
1103 }
1104
1105 static void
1106 generate_scratch_read(struct brw_codegen *p,
1107 vec4_instruction *inst,
1108 struct brw_reg dst,
1109 struct brw_reg index)
1110 {
1111 const struct brw_device_info *devinfo = p->devinfo;
1112 struct brw_reg header = brw_vec8_grf(0, 0);
1113
1114 gen6_resolve_implied_move(p, &header, inst->base_mrf);
1115
1116 generate_oword_dual_block_offsets(p, brw_message_reg(inst->base_mrf + 1),
1117 index);
1118
1119 uint32_t msg_type;
1120
1121 if (devinfo->gen >= 6)
1122 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1123 else if (devinfo->gen == 5 || devinfo->is_g4x)
1124 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1125 else
1126 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1127
1128 /* Each of the 8 channel enables is considered for whether each
1129 * dword is written.
1130 */
1131 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1132 brw_set_dest(p, send, dst);
1133 brw_set_src0(p, send, header);
1134 if (devinfo->gen < 6)
1135 brw_inst_set_cond_modifier(devinfo, send, inst->base_mrf);
1136 brw_set_dp_read_message(p, send,
1137 brw_scratch_surface_idx(p),
1138 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
1139 msg_type,
1140 BRW_DATAPORT_READ_TARGET_RENDER_CACHE,
1141 2, /* mlen */
1142 true, /* header_present */
1143 1 /* rlen */);
1144 }
1145
1146 static void
1147 generate_scratch_write(struct brw_codegen *p,
1148 vec4_instruction *inst,
1149 struct brw_reg dst,
1150 struct brw_reg src,
1151 struct brw_reg index)
1152 {
1153 const struct brw_device_info *devinfo = p->devinfo;
1154 struct brw_reg header = brw_vec8_grf(0, 0);
1155 bool write_commit;
1156
1157 /* If the instruction is predicated, we'll predicate the send, not
1158 * the header setup.
1159 */
1160 brw_set_default_predicate_control(p, false);
1161
1162 gen6_resolve_implied_move(p, &header, inst->base_mrf);
1163
1164 generate_oword_dual_block_offsets(p, brw_message_reg(inst->base_mrf + 1),
1165 index);
1166
1167 brw_MOV(p,
1168 retype(brw_message_reg(inst->base_mrf + 2), BRW_REGISTER_TYPE_D),
1169 retype(src, BRW_REGISTER_TYPE_D));
1170
1171 uint32_t msg_type;
1172
1173 if (devinfo->gen >= 7)
1174 msg_type = GEN7_DATAPORT_DC_OWORD_DUAL_BLOCK_WRITE;
1175 else if (devinfo->gen == 6)
1176 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
1177 else
1178 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE;
1179
1180 brw_set_default_predicate_control(p, inst->predicate);
1181
1182 /* Pre-gen6, we have to specify write commits to ensure ordering
1183 * between reads and writes within a thread. Afterwards, that's
1184 * guaranteed and write commits only matter for inter-thread
1185 * synchronization.
1186 */
1187 if (devinfo->gen >= 6) {
1188 write_commit = false;
1189 } else {
1190 /* The visitor set up our destination register to be g0. This
1191 * means that when the next read comes along, we will end up
1192 * reading from g0 and causing a block on the write commit. For
1193 * write-after-read, we are relying on the value of the previous
1194 * read being used (and thus blocking on completion) before our
1195 * write is executed. This means we have to be careful in
1196 * instruction scheduling to not violate this assumption.
1197 */
1198 write_commit = true;
1199 }
1200
1201 /* Each of the 8 channel enables is considered for whether each
1202 * dword is written.
1203 */
1204 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1205 brw_set_dest(p, send, dst);
1206 brw_set_src0(p, send, header);
1207 if (devinfo->gen < 6)
1208 brw_inst_set_cond_modifier(p->devinfo, send, inst->base_mrf);
1209 brw_set_dp_write_message(p, send,
1210 brw_scratch_surface_idx(p),
1211 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
1212 msg_type,
1213 3, /* mlen */
1214 true, /* header present */
1215 false, /* not a render target write */
1216 write_commit, /* rlen */
1217 false, /* eot */
1218 write_commit);
1219 }
1220
1221 static void
1222 generate_pull_constant_load(struct brw_codegen *p,
1223 struct brw_vue_prog_data *prog_data,
1224 vec4_instruction *inst,
1225 struct brw_reg dst,
1226 struct brw_reg index,
1227 struct brw_reg offset)
1228 {
1229 const struct brw_device_info *devinfo = p->devinfo;
1230 assert(index.file == BRW_IMMEDIATE_VALUE &&
1231 index.type == BRW_REGISTER_TYPE_UD);
1232 uint32_t surf_index = index.ud;
1233
1234 struct brw_reg header = brw_vec8_grf(0, 0);
1235
1236 gen6_resolve_implied_move(p, &header, inst->base_mrf);
1237
1238 if (devinfo->gen >= 6) {
1239 if (offset.file == BRW_IMMEDIATE_VALUE) {
1240 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1),
1241 BRW_REGISTER_TYPE_D),
1242 brw_imm_d(offset.ud >> 4));
1243 } else {
1244 brw_SHR(p, retype(brw_message_reg(inst->base_mrf + 1),
1245 BRW_REGISTER_TYPE_D),
1246 offset, brw_imm_d(4));
1247 }
1248 } else {
1249 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1),
1250 BRW_REGISTER_TYPE_D),
1251 offset);
1252 }
1253
1254 uint32_t msg_type;
1255
1256 if (devinfo->gen >= 6)
1257 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1258 else if (devinfo->gen == 5 || devinfo->is_g4x)
1259 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1260 else
1261 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
1262
1263 /* Each of the 8 channel enables is considered for whether each
1264 * dword is written.
1265 */
1266 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1267 brw_set_dest(p, send, dst);
1268 brw_set_src0(p, send, header);
1269 if (devinfo->gen < 6)
1270 brw_inst_set_cond_modifier(p->devinfo, send, inst->base_mrf);
1271 brw_set_dp_read_message(p, send,
1272 surf_index,
1273 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
1274 msg_type,
1275 BRW_DATAPORT_READ_TARGET_DATA_CACHE,
1276 2, /* mlen */
1277 true, /* header_present */
1278 1 /* rlen */);
1279 }
1280
1281 static void
1282 generate_get_buffer_size(struct brw_codegen *p,
1283 struct brw_vue_prog_data *prog_data,
1284 vec4_instruction *inst,
1285 struct brw_reg dst,
1286 struct brw_reg src,
1287 struct brw_reg surf_index)
1288 {
1289 assert(p->devinfo->gen >= 7);
1290 assert(surf_index.type == BRW_REGISTER_TYPE_UD &&
1291 surf_index.file == BRW_IMMEDIATE_VALUE);
1292
1293 brw_SAMPLE(p,
1294 dst,
1295 inst->base_mrf,
1296 src,
1297 surf_index.ud,
1298 0,
1299 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO,
1300 1, /* response length */
1301 inst->mlen,
1302 inst->header_size > 0,
1303 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1304 BRW_SAMPLER_RETURN_FORMAT_SINT32);
1305
1306 brw_mark_surface_used(&prog_data->base, surf_index.ud);
1307 }
1308
1309 static void
1310 generate_pull_constant_load_gen7(struct brw_codegen *p,
1311 struct brw_vue_prog_data *prog_data,
1312 vec4_instruction *inst,
1313 struct brw_reg dst,
1314 struct brw_reg surf_index,
1315 struct brw_reg offset)
1316 {
1317 assert(surf_index.type == BRW_REGISTER_TYPE_UD);
1318
1319 if (surf_index.file == BRW_IMMEDIATE_VALUE) {
1320
1321 brw_inst *insn = brw_next_insn(p, BRW_OPCODE_SEND);
1322 brw_set_dest(p, insn, dst);
1323 brw_set_src0(p, insn, offset);
1324 brw_set_sampler_message(p, insn,
1325 surf_index.ud,
1326 0, /* LD message ignores sampler unit */
1327 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1328 1, /* rlen */
1329 inst->mlen,
1330 inst->header_size != 0,
1331 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1332 0);
1333
1334 brw_mark_surface_used(&prog_data->base, surf_index.ud);
1335
1336 } else {
1337
1338 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1339
1340 brw_push_insn_state(p);
1341 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1342 brw_set_default_access_mode(p, BRW_ALIGN_1);
1343
1344 /* a0.0 = surf_index & 0xff */
1345 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
1346 brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
1347 brw_set_dest(p, insn_and, addr);
1348 brw_set_src0(p, insn_and, vec1(retype(surf_index, BRW_REGISTER_TYPE_UD)));
1349 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1350
1351 brw_pop_insn_state(p);
1352
1353 /* dst = send(offset, a0.0 | <descriptor>) */
1354 brw_inst *insn = brw_send_indirect_message(
1355 p, BRW_SFID_SAMPLER, dst, offset, addr);
1356 brw_set_sampler_message(p, insn,
1357 0 /* surface */,
1358 0 /* sampler */,
1359 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1360 1 /* rlen */,
1361 inst->mlen,
1362 inst->header_size != 0,
1363 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1364 0);
1365 }
1366 }
1367
1368 static void
1369 generate_set_simd4x2_header_gen9(struct brw_codegen *p,
1370 vec4_instruction *inst,
1371 struct brw_reg dst)
1372 {
1373 brw_push_insn_state(p);
1374 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1375
1376 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1377 brw_MOV(p, vec8(dst), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
1378
1379 brw_set_default_access_mode(p, BRW_ALIGN_1);
1380 brw_MOV(p, get_element_ud(dst, 2),
1381 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2));
1382
1383 brw_pop_insn_state(p);
1384 }
1385
1386 static void
1387 generate_code(struct brw_codegen *p,
1388 const struct brw_compiler *compiler,
1389 void *log_data,
1390 const nir_shader *nir,
1391 struct brw_vue_prog_data *prog_data,
1392 const struct cfg_t *cfg)
1393 {
1394 const struct brw_device_info *devinfo = p->devinfo;
1395 const char *stage_abbrev = _mesa_shader_stage_to_abbrev(nir->stage);
1396 bool debug_flag = INTEL_DEBUG &
1397 intel_debug_flag_for_shader_stage(nir->stage);
1398 struct annotation_info annotation;
1399 memset(&annotation, 0, sizeof(annotation));
1400 int loop_count = 0;
1401
1402 foreach_block_and_inst (block, vec4_instruction, inst, cfg) {
1403 struct brw_reg src[3], dst;
1404
1405 if (unlikely(debug_flag))
1406 annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset);
1407
1408 for (unsigned int i = 0; i < 3; i++) {
1409 src[i] = inst->src[i].as_brw_reg();
1410 }
1411 dst = inst->dst.as_brw_reg();
1412
1413 brw_set_default_predicate_control(p, inst->predicate);
1414 brw_set_default_predicate_inverse(p, inst->predicate_inverse);
1415 brw_set_default_flag_reg(p, 0, inst->flag_subreg);
1416 brw_set_default_saturate(p, inst->saturate);
1417 brw_set_default_mask_control(p, inst->force_writemask_all);
1418 brw_set_default_acc_write_control(p, inst->writes_accumulator);
1419
1420 assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
1421 assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
1422
1423 unsigned pre_emit_nr_insn = p->nr_insn;
1424
1425 if (dst.width == BRW_WIDTH_4) {
1426 /* This happens in attribute fixups for "dual instanced" geometry
1427 * shaders, since they use attributes that are vec4's. Since the exec
1428 * width is only 4, it's essential that the caller set
1429 * force_writemask_all in order to make sure the instruction is executed
1430 * regardless of which channels are enabled.
1431 */
1432 assert(inst->force_writemask_all);
1433
1434 /* Fix up any <8;8,1> or <0;4,1> source registers to <4;4,1> to satisfy
1435 * the following register region restrictions (from Graphics BSpec:
1436 * 3D-Media-GPGPU Engine > EU Overview > Registers and Register Regions
1437 * > Register Region Restrictions)
1438 *
1439 * 1. ExecSize must be greater than or equal to Width.
1440 *
1441 * 2. If ExecSize = Width and HorzStride != 0, VertStride must be set
1442 * to Width * HorzStride."
1443 */
1444 for (int i = 0; i < 3; i++) {
1445 if (src[i].file == BRW_GENERAL_REGISTER_FILE)
1446 src[i] = stride(src[i], 4, 4, 1);
1447 }
1448 }
1449
1450 switch (inst->opcode) {
1451 case VEC4_OPCODE_UNPACK_UNIFORM:
1452 case BRW_OPCODE_MOV:
1453 brw_MOV(p, dst, src[0]);
1454 break;
1455 case BRW_OPCODE_ADD:
1456 brw_ADD(p, dst, src[0], src[1]);
1457 break;
1458 case BRW_OPCODE_MUL:
1459 brw_MUL(p, dst, src[0], src[1]);
1460 break;
1461 case BRW_OPCODE_MACH:
1462 brw_MACH(p, dst, src[0], src[1]);
1463 break;
1464
1465 case BRW_OPCODE_MAD:
1466 assert(devinfo->gen >= 6);
1467 brw_MAD(p, dst, src[0], src[1], src[2]);
1468 break;
1469
1470 case BRW_OPCODE_FRC:
1471 brw_FRC(p, dst, src[0]);
1472 break;
1473 case BRW_OPCODE_RNDD:
1474 brw_RNDD(p, dst, src[0]);
1475 break;
1476 case BRW_OPCODE_RNDE:
1477 brw_RNDE(p, dst, src[0]);
1478 break;
1479 case BRW_OPCODE_RNDZ:
1480 brw_RNDZ(p, dst, src[0]);
1481 break;
1482
1483 case BRW_OPCODE_AND:
1484 brw_AND(p, dst, src[0], src[1]);
1485 break;
1486 case BRW_OPCODE_OR:
1487 brw_OR(p, dst, src[0], src[1]);
1488 break;
1489 case BRW_OPCODE_XOR:
1490 brw_XOR(p, dst, src[0], src[1]);
1491 break;
1492 case BRW_OPCODE_NOT:
1493 brw_NOT(p, dst, src[0]);
1494 break;
1495 case BRW_OPCODE_ASR:
1496 brw_ASR(p, dst, src[0], src[1]);
1497 break;
1498 case BRW_OPCODE_SHR:
1499 brw_SHR(p, dst, src[0], src[1]);
1500 break;
1501 case BRW_OPCODE_SHL:
1502 brw_SHL(p, dst, src[0], src[1]);
1503 break;
1504
1505 case BRW_OPCODE_CMP:
1506 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
1507 break;
1508 case BRW_OPCODE_SEL:
1509 brw_SEL(p, dst, src[0], src[1]);
1510 break;
1511
1512 case BRW_OPCODE_DPH:
1513 brw_DPH(p, dst, src[0], src[1]);
1514 break;
1515
1516 case BRW_OPCODE_DP4:
1517 brw_DP4(p, dst, src[0], src[1]);
1518 break;
1519
1520 case BRW_OPCODE_DP3:
1521 brw_DP3(p, dst, src[0], src[1]);
1522 break;
1523
1524 case BRW_OPCODE_DP2:
1525 brw_DP2(p, dst, src[0], src[1]);
1526 break;
1527
1528 case BRW_OPCODE_F32TO16:
1529 assert(devinfo->gen >= 7);
1530 brw_F32TO16(p, dst, src[0]);
1531 break;
1532
1533 case BRW_OPCODE_F16TO32:
1534 assert(devinfo->gen >= 7);
1535 brw_F16TO32(p, dst, src[0]);
1536 break;
1537
1538 case BRW_OPCODE_LRP:
1539 assert(devinfo->gen >= 6);
1540 brw_LRP(p, dst, src[0], src[1], src[2]);
1541 break;
1542
1543 case BRW_OPCODE_BFREV:
1544 assert(devinfo->gen >= 7);
1545 /* BFREV only supports UD type for src and dst. */
1546 brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
1547 retype(src[0], BRW_REGISTER_TYPE_UD));
1548 break;
1549 case BRW_OPCODE_FBH:
1550 assert(devinfo->gen >= 7);
1551 /* FBH only supports UD type for dst. */
1552 brw_FBH(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1553 break;
1554 case BRW_OPCODE_FBL:
1555 assert(devinfo->gen >= 7);
1556 /* FBL only supports UD type for dst. */
1557 brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1558 break;
1559 case BRW_OPCODE_CBIT:
1560 assert(devinfo->gen >= 7);
1561 /* CBIT only supports UD type for dst. */
1562 brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1563 break;
1564 case BRW_OPCODE_ADDC:
1565 assert(devinfo->gen >= 7);
1566 brw_ADDC(p, dst, src[0], src[1]);
1567 break;
1568 case BRW_OPCODE_SUBB:
1569 assert(devinfo->gen >= 7);
1570 brw_SUBB(p, dst, src[0], src[1]);
1571 break;
1572 case BRW_OPCODE_MAC:
1573 brw_MAC(p, dst, src[0], src[1]);
1574 break;
1575
1576 case BRW_OPCODE_BFE:
1577 assert(devinfo->gen >= 7);
1578 brw_BFE(p, dst, src[0], src[1], src[2]);
1579 break;
1580
1581 case BRW_OPCODE_BFI1:
1582 assert(devinfo->gen >= 7);
1583 brw_BFI1(p, dst, src[0], src[1]);
1584 break;
1585 case BRW_OPCODE_BFI2:
1586 assert(devinfo->gen >= 7);
1587 brw_BFI2(p, dst, src[0], src[1], src[2]);
1588 break;
1589
1590 case BRW_OPCODE_IF:
1591 if (!inst->src[0].is_null()) {
1592 /* The instruction has an embedded compare (only allowed on gen6) */
1593 assert(devinfo->gen == 6);
1594 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
1595 } else {
1596 brw_inst *if_inst = brw_IF(p, BRW_EXECUTE_8);
1597 brw_inst_set_pred_control(p->devinfo, if_inst, inst->predicate);
1598 }
1599 break;
1600
1601 case BRW_OPCODE_ELSE:
1602 brw_ELSE(p);
1603 break;
1604 case BRW_OPCODE_ENDIF:
1605 brw_ENDIF(p);
1606 break;
1607
1608 case BRW_OPCODE_DO:
1609 brw_DO(p, BRW_EXECUTE_8);
1610 break;
1611
1612 case BRW_OPCODE_BREAK:
1613 brw_BREAK(p);
1614 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1615 break;
1616 case BRW_OPCODE_CONTINUE:
1617 brw_CONT(p);
1618 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1619 break;
1620
1621 case BRW_OPCODE_WHILE:
1622 brw_WHILE(p);
1623 loop_count++;
1624 break;
1625
1626 case SHADER_OPCODE_RCP:
1627 case SHADER_OPCODE_RSQ:
1628 case SHADER_OPCODE_SQRT:
1629 case SHADER_OPCODE_EXP2:
1630 case SHADER_OPCODE_LOG2:
1631 case SHADER_OPCODE_SIN:
1632 case SHADER_OPCODE_COS:
1633 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1634 if (devinfo->gen >= 7) {
1635 gen6_math(p, dst, brw_math_function(inst->opcode), src[0],
1636 brw_null_reg());
1637 } else if (devinfo->gen == 6) {
1638 generate_math_gen6(p, inst, dst, src[0], brw_null_reg());
1639 } else {
1640 generate_math1_gen4(p, inst, dst, src[0]);
1641 }
1642 break;
1643
1644 case SHADER_OPCODE_POW:
1645 case SHADER_OPCODE_INT_QUOTIENT:
1646 case SHADER_OPCODE_INT_REMAINDER:
1647 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
1648 if (devinfo->gen >= 7) {
1649 gen6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
1650 } else if (devinfo->gen == 6) {
1651 generate_math_gen6(p, inst, dst, src[0], src[1]);
1652 } else {
1653 generate_math2_gen4(p, inst, dst, src[0], src[1]);
1654 }
1655 break;
1656
1657 case SHADER_OPCODE_TEX:
1658 case SHADER_OPCODE_TXD:
1659 case SHADER_OPCODE_TXF:
1660 case SHADER_OPCODE_TXF_CMS:
1661 case SHADER_OPCODE_TXF_CMS_W:
1662 case SHADER_OPCODE_TXF_MCS:
1663 case SHADER_OPCODE_TXL:
1664 case SHADER_OPCODE_TXS:
1665 case SHADER_OPCODE_TG4:
1666 case SHADER_OPCODE_TG4_OFFSET:
1667 case SHADER_OPCODE_SAMPLEINFO:
1668 generate_tex(p, prog_data, inst, dst, src[0], src[1]);
1669 break;
1670
1671 case VS_OPCODE_URB_WRITE:
1672 generate_vs_urb_write(p, inst);
1673 break;
1674
1675 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1676 generate_scratch_read(p, inst, dst, src[0]);
1677 break;
1678
1679 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1680 generate_scratch_write(p, inst, dst, src[0], src[1]);
1681 break;
1682
1683 case VS_OPCODE_PULL_CONSTANT_LOAD:
1684 generate_pull_constant_load(p, prog_data, inst, dst, src[0], src[1]);
1685 break;
1686
1687 case VS_OPCODE_PULL_CONSTANT_LOAD_GEN7:
1688 generate_pull_constant_load_gen7(p, prog_data, inst, dst, src[0], src[1]);
1689 break;
1690
1691 case VS_OPCODE_SET_SIMD4X2_HEADER_GEN9:
1692 generate_set_simd4x2_header_gen9(p, inst, dst);
1693 break;
1694
1695
1696 case VS_OPCODE_GET_BUFFER_SIZE:
1697 generate_get_buffer_size(p, prog_data, inst, dst, src[0], src[1]);
1698 break;
1699
1700 case GS_OPCODE_URB_WRITE:
1701 generate_gs_urb_write(p, inst);
1702 break;
1703
1704 case GS_OPCODE_URB_WRITE_ALLOCATE:
1705 generate_gs_urb_write_allocate(p, inst);
1706 break;
1707
1708 case GS_OPCODE_SVB_WRITE:
1709 generate_gs_svb_write(p, prog_data, inst, dst, src[0], src[1]);
1710 break;
1711
1712 case GS_OPCODE_SVB_SET_DST_INDEX:
1713 generate_gs_svb_set_destination_index(p, inst, dst, src[0]);
1714 break;
1715
1716 case GS_OPCODE_THREAD_END:
1717 generate_gs_thread_end(p, inst);
1718 break;
1719
1720 case GS_OPCODE_SET_WRITE_OFFSET:
1721 generate_gs_set_write_offset(p, dst, src[0], src[1]);
1722 break;
1723
1724 case GS_OPCODE_SET_VERTEX_COUNT:
1725 generate_gs_set_vertex_count(p, dst, src[0]);
1726 break;
1727
1728 case GS_OPCODE_FF_SYNC:
1729 generate_gs_ff_sync(p, inst, dst, src[0], src[1]);
1730 break;
1731
1732 case GS_OPCODE_FF_SYNC_SET_PRIMITIVES:
1733 generate_gs_ff_sync_set_primitives(p, dst, src[0], src[1], src[2]);
1734 break;
1735
1736 case GS_OPCODE_SET_PRIMITIVE_ID:
1737 generate_gs_set_primitive_id(p, dst);
1738 break;
1739
1740 case GS_OPCODE_SET_DWORD_2:
1741 generate_gs_set_dword_2(p, dst, src[0]);
1742 break;
1743
1744 case GS_OPCODE_PREPARE_CHANNEL_MASKS:
1745 generate_gs_prepare_channel_masks(p, dst);
1746 break;
1747
1748 case GS_OPCODE_SET_CHANNEL_MASKS:
1749 generate_gs_set_channel_masks(p, dst, src[0]);
1750 break;
1751
1752 case GS_OPCODE_GET_INSTANCE_ID:
1753 generate_gs_get_instance_id(p, dst);
1754 break;
1755
1756 case SHADER_OPCODE_SHADER_TIME_ADD:
1757 brw_shader_time_add(p, src[0],
1758 prog_data->base.binding_table.shader_time_start);
1759 brw_mark_surface_used(&prog_data->base,
1760 prog_data->base.binding_table.shader_time_start);
1761 break;
1762
1763 case SHADER_OPCODE_UNTYPED_ATOMIC:
1764 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1765 brw_untyped_atomic(p, dst, src[0], src[1], src[2].ud, inst->mlen,
1766 !inst->dst.is_null());
1767 break;
1768
1769 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
1770 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1771 brw_untyped_surface_read(p, dst, src[0], src[1], inst->mlen,
1772 src[2].ud);
1773 break;
1774
1775 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
1776 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1777 brw_untyped_surface_write(p, src[0], src[1], inst->mlen,
1778 src[2].ud);
1779 break;
1780
1781 case SHADER_OPCODE_TYPED_ATOMIC:
1782 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1783 brw_typed_atomic(p, dst, src[0], src[1], src[2].ud, inst->mlen,
1784 !inst->dst.is_null());
1785 break;
1786
1787 case SHADER_OPCODE_TYPED_SURFACE_READ:
1788 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1789 brw_typed_surface_read(p, dst, src[0], src[1], inst->mlen,
1790 src[2].ud);
1791 break;
1792
1793 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
1794 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1795 brw_typed_surface_write(p, src[0], src[1], inst->mlen,
1796 src[2].ud);
1797 break;
1798
1799 case SHADER_OPCODE_MEMORY_FENCE:
1800 brw_memory_fence(p, dst);
1801 break;
1802
1803 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
1804 brw_find_live_channel(p, dst);
1805 break;
1806
1807 case SHADER_OPCODE_BROADCAST:
1808 brw_broadcast(p, dst, src[0], src[1]);
1809 break;
1810
1811 case VS_OPCODE_UNPACK_FLAGS_SIMD4X2:
1812 generate_unpack_flags(p, dst);
1813 break;
1814
1815 case VEC4_OPCODE_MOV_BYTES: {
1816 /* Moves the low byte from each channel, using an Align1 access mode
1817 * and a <4,1,0> source region.
1818 */
1819 assert(src[0].type == BRW_REGISTER_TYPE_UB ||
1820 src[0].type == BRW_REGISTER_TYPE_B);
1821
1822 brw_set_default_access_mode(p, BRW_ALIGN_1);
1823 src[0].vstride = BRW_VERTICAL_STRIDE_4;
1824 src[0].width = BRW_WIDTH_1;
1825 src[0].hstride = BRW_HORIZONTAL_STRIDE_0;
1826 brw_MOV(p, dst, src[0]);
1827 brw_set_default_access_mode(p, BRW_ALIGN_16);
1828 break;
1829 }
1830
1831 case VEC4_OPCODE_PACK_BYTES: {
1832 /* Is effectively:
1833 *
1834 * mov(8) dst<16,4,1>:UB src<4,1,0>:UB
1835 *
1836 * but destinations' only regioning is horizontal stride, so instead we
1837 * have to use two instructions:
1838 *
1839 * mov(4) dst<1>:UB src<4,1,0>:UB
1840 * mov(4) dst.16<1>:UB src.16<4,1,0>:UB
1841 *
1842 * where they pack the four bytes from the low and high four DW.
1843 */
1844 assert(_mesa_is_pow_two(dst.writemask) &&
1845 dst.writemask != 0);
1846 unsigned offset = __builtin_ctz(dst.writemask);
1847
1848 dst.type = BRW_REGISTER_TYPE_UB;
1849
1850 brw_set_default_access_mode(p, BRW_ALIGN_1);
1851
1852 src[0].type = BRW_REGISTER_TYPE_UB;
1853 src[0].vstride = BRW_VERTICAL_STRIDE_4;
1854 src[0].width = BRW_WIDTH_1;
1855 src[0].hstride = BRW_HORIZONTAL_STRIDE_0;
1856 dst.subnr = offset * 4;
1857 struct brw_inst *insn = brw_MOV(p, dst, src[0]);
1858 brw_inst_set_exec_size(p->devinfo, insn, BRW_EXECUTE_4);
1859 brw_inst_set_no_dd_clear(p->devinfo, insn, true);
1860 brw_inst_set_no_dd_check(p->devinfo, insn, inst->no_dd_check);
1861
1862 src[0].subnr = 16;
1863 dst.subnr = 16 + offset * 4;
1864 insn = brw_MOV(p, dst, src[0]);
1865 brw_inst_set_exec_size(p->devinfo, insn, BRW_EXECUTE_4);
1866 brw_inst_set_no_dd_clear(p->devinfo, insn, inst->no_dd_clear);
1867 brw_inst_set_no_dd_check(p->devinfo, insn, true);
1868
1869 brw_set_default_access_mode(p, BRW_ALIGN_16);
1870 break;
1871 }
1872
1873 case TCS_OPCODE_URB_WRITE:
1874 generate_tcs_urb_write(p, inst, src[0]);
1875 break;
1876
1877 case VEC4_OPCODE_URB_READ:
1878 generate_vec4_urb_read(p, inst, dst, src[0]);
1879 break;
1880
1881 case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
1882 generate_tcs_input_urb_offsets(p, dst, src[0], src[1]);
1883 break;
1884
1885 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
1886 generate_tcs_output_urb_offsets(p, dst, src[0], src[1]);
1887 break;
1888
1889 case TCS_OPCODE_GET_INSTANCE_ID:
1890 generate_tcs_get_instance_id(p, dst);
1891 break;
1892
1893 case TCS_OPCODE_GET_PRIMITIVE_ID:
1894 generate_tcs_get_primitive_id(p, dst);
1895 break;
1896
1897 case TCS_OPCODE_CREATE_BARRIER_HEADER:
1898 generate_tcs_create_barrier_header(p, prog_data, dst);
1899 break;
1900
1901 case TES_OPCODE_CREATE_INPUT_READ_HEADER:
1902 generate_tes_create_input_read_header(p, dst);
1903 break;
1904
1905 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
1906 generate_tes_add_indirect_urb_offset(p, dst, src[0], src[1]);
1907 break;
1908
1909 case TES_OPCODE_GET_PRIMITIVE_ID:
1910 generate_tes_get_primitive_id(p, dst);
1911 break;
1912
1913 case TCS_OPCODE_SRC0_010_IS_ZERO:
1914 /* If src_reg had stride like fs_reg, we wouldn't need this. */
1915 brw_MOV(p, brw_null_reg(), stride(src[0], 0, 1, 0));
1916 brw_inst_set_cond_modifier(devinfo, brw_last_inst, BRW_CONDITIONAL_Z);
1917 break;
1918
1919 case TCS_OPCODE_RELEASE_INPUT:
1920 generate_tcs_release_input(p, dst, src[0], src[1]);
1921 break;
1922
1923 case TCS_OPCODE_THREAD_END:
1924 generate_tcs_thread_end(p, inst);
1925 break;
1926
1927 case SHADER_OPCODE_BARRIER:
1928 brw_barrier(p, src[0]);
1929 brw_WAIT(p);
1930 break;
1931
1932 default:
1933 unreachable("Unsupported opcode");
1934 }
1935
1936 if (inst->opcode == VEC4_OPCODE_PACK_BYTES) {
1937 /* Handled dependency hints in the generator. */
1938
1939 assert(!inst->conditional_mod);
1940 } else if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
1941 assert(p->nr_insn == pre_emit_nr_insn + 1 ||
1942 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
1943 "emitting more than 1 instruction");
1944
1945 brw_inst *last = &p->store[pre_emit_nr_insn];
1946
1947 if (inst->conditional_mod)
1948 brw_inst_set_cond_modifier(p->devinfo, last, inst->conditional_mod);
1949 brw_inst_set_no_dd_clear(p->devinfo, last, inst->no_dd_clear);
1950 brw_inst_set_no_dd_check(p->devinfo, last, inst->no_dd_check);
1951 }
1952 }
1953
1954 brw_set_uip_jip(p);
1955 annotation_finalize(&annotation, p->next_insn_offset);
1956
1957 #ifndef NDEBUG
1958 bool validated = brw_validate_instructions(p, 0, &annotation);
1959 #else
1960 if (unlikely(debug_flag))
1961 brw_validate_instructions(p, 0, &annotation);
1962 #endif
1963
1964 int before_size = p->next_insn_offset;
1965 brw_compact_instructions(p, 0, annotation.ann_count, annotation.ann);
1966 int after_size = p->next_insn_offset;
1967
1968 if (unlikely(debug_flag)) {
1969 fprintf(stderr, "Native code for %s %s shader %s:\n",
1970 nir->info.label ? nir->info.label : "unnamed",
1971 _mesa_shader_stage_to_string(nir->stage), nir->info.name);
1972
1973 fprintf(stderr, "%s vec4 shader: %d instructions. %d loops. %u cycles."
1974 "Compacted %d to %d bytes (%.0f%%)\n",
1975 stage_abbrev,
1976 before_size / 16, loop_count, cfg->cycle_count, before_size, after_size,
1977 100.0f * (before_size - after_size) / before_size);
1978
1979 dump_assembly(p->store, annotation.ann_count, annotation.ann,
1980 p->devinfo);
1981 ralloc_free(annotation.mem_ctx);
1982 }
1983 assert(validated);
1984
1985 compiler->shader_debug_log(log_data,
1986 "%s vec4 shader: %d inst, %d loops, %u cycles, "
1987 "compacted %d to %d bytes.\n",
1988 stage_abbrev, before_size / 16,
1989 loop_count, cfg->cycle_count,
1990 before_size, after_size);
1991 }
1992
1993 extern "C" const unsigned *
1994 brw_vec4_generate_assembly(const struct brw_compiler *compiler,
1995 void *log_data,
1996 void *mem_ctx,
1997 const nir_shader *nir,
1998 struct brw_vue_prog_data *prog_data,
1999 const struct cfg_t *cfg,
2000 unsigned *out_assembly_size)
2001 {
2002 struct brw_codegen *p = rzalloc(mem_ctx, struct brw_codegen);
2003 brw_init_codegen(compiler->devinfo, p, mem_ctx);
2004 brw_set_default_access_mode(p, BRW_ALIGN_16);
2005
2006 generate_code(p, compiler, log_data, nir, prog_data, cfg);
2007
2008 return brw_get_program(p, out_assembly_size);
2009 }