10be41b1803f918f8c8249e7d17739b8a0e89935
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_gs_visitor.cpp
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file brw_vec4_gs_visitor.cpp
26 *
27 * Geometry-shader-specific code derived from the vec4_visitor class.
28 */
29
30 #include "brw_vec4_gs_visitor.h"
31 #include "gen6_gs_visitor.h"
32 #include "brw_fs.h"
33 #include "brw_nir.h"
34
35 namespace brw {
36
37 vec4_gs_visitor::vec4_gs_visitor(const struct brw_compiler *compiler,
38 void *log_data,
39 struct brw_gs_compile *c,
40 struct brw_gs_prog_data *prog_data,
41 const nir_shader *shader,
42 void *mem_ctx,
43 bool no_spills,
44 int shader_time_index)
45 : vec4_visitor(compiler, log_data, &c->key.tex,
46 &prog_data->base, shader, mem_ctx,
47 no_spills, shader_time_index),
48 c(c),
49 gs_prog_data(prog_data)
50 {
51 }
52
53
54 dst_reg *
55 vec4_gs_visitor::make_reg_for_system_value(int location)
56 {
57 dst_reg *reg = new(mem_ctx) dst_reg(this, glsl_type::int_type);
58
59 switch (location) {
60 case SYSTEM_VALUE_INVOCATION_ID:
61 this->current_annotation = "initialize gl_InvocationID";
62 if (gs_prog_data->invocations > 1)
63 emit(GS_OPCODE_GET_INSTANCE_ID, *reg);
64 else
65 emit(MOV(*reg, brw_imm_ud(0)));
66 break;
67 default:
68 unreachable("not reached");
69 }
70
71 return reg;
72 }
73
74
75 int
76 vec4_gs_visitor::setup_varying_inputs(int payload_reg, int *attribute_map,
77 int attributes_per_reg)
78 {
79 /* For geometry shaders there are N copies of the input attributes, where N
80 * is the number of input vertices. attribute_map[BRW_VARYING_SLOT_COUNT *
81 * i + j] represents attribute j for vertex i.
82 *
83 * Note that GS inputs are read from the VUE 256 bits (2 vec4's) at a time,
84 * so the total number of input slots that will be delivered to the GS (and
85 * thus the stride of the input arrays) is urb_read_length * 2.
86 */
87 const unsigned num_input_vertices = nir->info->gs.vertices_in;
88 assert(num_input_vertices <= MAX_GS_INPUT_VERTICES);
89 unsigned input_array_stride = prog_data->urb_read_length * 2;
90
91 for (int slot = 0; slot < c->input_vue_map.num_slots; slot++) {
92 int varying = c->input_vue_map.slot_to_varying[slot];
93 for (unsigned vertex = 0; vertex < num_input_vertices; vertex++) {
94 attribute_map[BRW_VARYING_SLOT_COUNT * vertex + varying] =
95 attributes_per_reg * payload_reg + input_array_stride * vertex +
96 slot;
97 }
98 }
99
100 int regs_used = ALIGN(input_array_stride * num_input_vertices,
101 attributes_per_reg) / attributes_per_reg;
102 return payload_reg + regs_used;
103 }
104
105
106 void
107 vec4_gs_visitor::setup_payload()
108 {
109 int attribute_map[BRW_VARYING_SLOT_COUNT * MAX_GS_INPUT_VERTICES];
110
111 /* If we are in dual instanced or single mode, then attributes are going
112 * to be interleaved, so one register contains two attribute slots.
113 */
114 int attributes_per_reg =
115 prog_data->dispatch_mode == DISPATCH_MODE_4X2_DUAL_OBJECT ? 1 : 2;
116
117 /* If a geometry shader tries to read from an input that wasn't written by
118 * the vertex shader, that produces undefined results, but it shouldn't
119 * crash anything. So initialize attribute_map to zeros--that ensures that
120 * these undefined results are read from r0.
121 */
122 memset(attribute_map, 0, sizeof(attribute_map));
123
124 int reg = 0;
125
126 /* The payload always contains important data in r0, which contains
127 * the URB handles that are passed on to the URB write at the end
128 * of the thread.
129 */
130 reg++;
131
132 /* If the shader uses gl_PrimitiveIDIn, that goes in r1. */
133 if (gs_prog_data->include_primitive_id)
134 attribute_map[VARYING_SLOT_PRIMITIVE_ID] = attributes_per_reg * reg++;
135
136 reg = setup_uniforms(reg);
137
138 reg = setup_varying_inputs(reg, attribute_map, attributes_per_reg);
139
140 lower_attributes_to_hw_regs(attribute_map, attributes_per_reg > 1);
141
142 this->first_non_payload_grf = reg;
143 }
144
145
146 void
147 vec4_gs_visitor::emit_prolog()
148 {
149 /* In vertex shaders, r0.2 is guaranteed to be initialized to zero. In
150 * geometry shaders, it isn't (it contains a bunch of information we don't
151 * need, like the input primitive type). We need r0.2 to be zero in order
152 * to build scratch read/write messages correctly (otherwise this value
153 * will be interpreted as a global offset, causing us to do our scratch
154 * reads/writes to garbage memory). So just set it to zero at the top of
155 * the shader.
156 */
157 this->current_annotation = "clear r0.2";
158 dst_reg r0(retype(brw_vec4_grf(0, 0), BRW_REGISTER_TYPE_UD));
159 vec4_instruction *inst = emit(GS_OPCODE_SET_DWORD_2, r0, brw_imm_ud(0u));
160 inst->force_writemask_all = true;
161
162 /* Create a virtual register to hold the vertex count */
163 this->vertex_count = src_reg(this, glsl_type::uint_type);
164
165 /* Initialize the vertex_count register to 0 */
166 this->current_annotation = "initialize vertex_count";
167 inst = emit(MOV(dst_reg(this->vertex_count), brw_imm_ud(0u)));
168 inst->force_writemask_all = true;
169
170 if (c->control_data_header_size_bits > 0) {
171 /* Create a virtual register to hold the current set of control data
172 * bits.
173 */
174 this->control_data_bits = src_reg(this, glsl_type::uint_type);
175
176 /* If we're outputting more than 32 control data bits, then EmitVertex()
177 * will set control_data_bits to 0 after emitting the first vertex.
178 * Otherwise, we need to initialize it to 0 here.
179 */
180 if (c->control_data_header_size_bits <= 32) {
181 this->current_annotation = "initialize control data bits";
182 inst = emit(MOV(dst_reg(this->control_data_bits), brw_imm_ud(0u)));
183 inst->force_writemask_all = true;
184 }
185 }
186
187 this->current_annotation = NULL;
188 }
189
190 void
191 vec4_gs_visitor::emit_thread_end()
192 {
193 if (c->control_data_header_size_bits > 0) {
194 /* During shader execution, we only ever call emit_control_data_bits()
195 * just prior to outputting a vertex. Therefore, the control data bits
196 * corresponding to the most recently output vertex still need to be
197 * emitted.
198 */
199 current_annotation = "thread end: emit control data bits";
200 emit_control_data_bits();
201 }
202
203 /* MRF 0 is reserved for the debugger, so start with message header
204 * in MRF 1.
205 */
206 int base_mrf = 1;
207
208 bool static_vertex_count = gs_prog_data->static_vertex_count != -1;
209
210 /* If the previous instruction was a URB write, we don't need to issue
211 * a second one - we can just set the EOT bit on the previous write.
212 *
213 * Skip this on Gen8+ unless there's a static vertex count, as we also
214 * need to write the vertex count out, and combining the two may not be
215 * possible (or at least not straightforward).
216 */
217 vec4_instruction *last = (vec4_instruction *) instructions.get_tail();
218 if (last && last->opcode == GS_OPCODE_URB_WRITE &&
219 !(INTEL_DEBUG & DEBUG_SHADER_TIME) &&
220 devinfo->gen >= 8 && static_vertex_count) {
221 last->urb_write_flags = BRW_URB_WRITE_EOT | last->urb_write_flags;
222 return;
223 }
224
225 current_annotation = "thread end";
226 dst_reg mrf_reg(MRF, base_mrf);
227 src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
228 vec4_instruction *inst = emit(MOV(mrf_reg, r0));
229 inst->force_writemask_all = true;
230 if (devinfo->gen < 8 || !static_vertex_count)
231 emit(GS_OPCODE_SET_VERTEX_COUNT, mrf_reg, this->vertex_count);
232 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
233 emit_shader_time_end();
234 inst = emit(GS_OPCODE_THREAD_END);
235 inst->base_mrf = base_mrf;
236 inst->mlen = devinfo->gen >= 8 && !static_vertex_count ? 2 : 1;
237 }
238
239
240 void
241 vec4_gs_visitor::emit_urb_write_header(int mrf)
242 {
243 /* The SEND instruction that writes the vertex data to the VUE will use
244 * per_slot_offset=true, which means that DWORDs 3 and 4 of the message
245 * header specify an offset (in multiples of 256 bits) into the URB entry
246 * at which the write should take place.
247 *
248 * So we have to prepare a message header with the appropriate offset
249 * values.
250 */
251 dst_reg mrf_reg(MRF, mrf);
252 src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
253 this->current_annotation = "URB write header";
254 vec4_instruction *inst = emit(MOV(mrf_reg, r0));
255 inst->force_writemask_all = true;
256 emit(GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, this->vertex_count,
257 brw_imm_ud(gs_prog_data->output_vertex_size_hwords));
258 }
259
260
261 vec4_instruction *
262 vec4_gs_visitor::emit_urb_write_opcode(bool complete)
263 {
264 /* We don't care whether the vertex is complete, because in general
265 * geometry shaders output multiple vertices, and we don't terminate the
266 * thread until all vertices are complete.
267 */
268 (void) complete;
269
270 vec4_instruction *inst = emit(GS_OPCODE_URB_WRITE);
271 inst->offset = gs_prog_data->control_data_header_size_hwords;
272
273 /* We need to increment Global Offset by 1 to make room for Broadwell's
274 * extra "Vertex Count" payload at the beginning of the URB entry.
275 */
276 if (devinfo->gen >= 8 && gs_prog_data->static_vertex_count == -1)
277 inst->offset++;
278
279 inst->urb_write_flags = BRW_URB_WRITE_PER_SLOT_OFFSET;
280 return inst;
281 }
282
283
284 /**
285 * Write out a batch of 32 control data bits from the control_data_bits
286 * register to the URB.
287 *
288 * The current value of the vertex_count register determines which DWORD in
289 * the URB receives the control data bits. The control_data_bits register is
290 * assumed to contain the correct data for the vertex that was most recently
291 * output, and all previous vertices that share the same DWORD.
292 *
293 * This function takes care of ensuring that if no vertices have been output
294 * yet, no control bits are emitted.
295 */
296 void
297 vec4_gs_visitor::emit_control_data_bits()
298 {
299 assert(c->control_data_bits_per_vertex != 0);
300
301 /* Since the URB_WRITE_OWORD message operates with 128-bit (vec4 sized)
302 * granularity, we need to use two tricks to ensure that the batch of 32
303 * control data bits is written to the appropriate DWORD in the URB. To
304 * select which vec4 we are writing to, we use the "slot {0,1} offset"
305 * fields of the message header. To select which DWORD in the vec4 we are
306 * writing to, we use the channel mask fields of the message header. To
307 * avoid penalizing geometry shaders that emit a small number of vertices
308 * with extra bookkeeping, we only do each of these tricks when
309 * c->prog_data.control_data_header_size_bits is large enough to make it
310 * necessary.
311 *
312 * Note: this means that if we're outputting just a single DWORD of control
313 * data bits, we'll actually replicate it four times since we won't do any
314 * channel masking. But that's not a problem since in this case the
315 * hardware only pays attention to the first DWORD.
316 */
317 enum brw_urb_write_flags urb_write_flags = BRW_URB_WRITE_OWORD;
318 if (c->control_data_header_size_bits > 32)
319 urb_write_flags = urb_write_flags | BRW_URB_WRITE_USE_CHANNEL_MASKS;
320 if (c->control_data_header_size_bits > 128)
321 urb_write_flags = urb_write_flags | BRW_URB_WRITE_PER_SLOT_OFFSET;
322
323 /* If we are using either channel masks or a per-slot offset, then we
324 * need to figure out which DWORD we are trying to write to, using the
325 * formula:
326 *
327 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
328 *
329 * Since bits_per_vertex is a power of two, and is known at compile
330 * time, this can be optimized to:
331 *
332 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
333 */
334 src_reg dword_index(this, glsl_type::uint_type);
335 if (urb_write_flags) {
336 src_reg prev_count(this, glsl_type::uint_type);
337 emit(ADD(dst_reg(prev_count), this->vertex_count,
338 brw_imm_ud(0xffffffffu)));
339 unsigned log2_bits_per_vertex =
340 util_last_bit(c->control_data_bits_per_vertex);
341 emit(SHR(dst_reg(dword_index), prev_count,
342 brw_imm_ud(6 - log2_bits_per_vertex)));
343 }
344
345 /* Start building the URB write message. The first MRF gets a copy of
346 * R0.
347 */
348 int base_mrf = 1;
349 dst_reg mrf_reg(MRF, base_mrf);
350 src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
351 vec4_instruction *inst = emit(MOV(mrf_reg, r0));
352 inst->force_writemask_all = true;
353
354 if (urb_write_flags & BRW_URB_WRITE_PER_SLOT_OFFSET) {
355 /* Set the per-slot offset to dword_index / 4, to that we'll write to
356 * the appropriate OWORD within the control data header.
357 */
358 src_reg per_slot_offset(this, glsl_type::uint_type);
359 emit(SHR(dst_reg(per_slot_offset), dword_index, brw_imm_ud(2u)));
360 emit(GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, per_slot_offset,
361 brw_imm_ud(1u));
362 }
363
364 if (urb_write_flags & BRW_URB_WRITE_USE_CHANNEL_MASKS) {
365 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
366 * write to the appropriate DWORD within the OWORD. We need to do
367 * this computation with force_writemask_all, otherwise garbage data
368 * from invocation 0 might clobber the mask for invocation 1 when
369 * GS_OPCODE_PREPARE_CHANNEL_MASKS tries to OR the two masks
370 * together.
371 */
372 src_reg channel(this, glsl_type::uint_type);
373 inst = emit(AND(dst_reg(channel), dword_index, brw_imm_ud(3u)));
374 inst->force_writemask_all = true;
375 src_reg one(this, glsl_type::uint_type);
376 inst = emit(MOV(dst_reg(one), brw_imm_ud(1u)));
377 inst->force_writemask_all = true;
378 src_reg channel_mask(this, glsl_type::uint_type);
379 inst = emit(SHL(dst_reg(channel_mask), one, channel));
380 inst->force_writemask_all = true;
381 emit(GS_OPCODE_PREPARE_CHANNEL_MASKS, dst_reg(channel_mask),
382 channel_mask);
383 emit(GS_OPCODE_SET_CHANNEL_MASKS, mrf_reg, channel_mask);
384 }
385
386 /* Store the control data bits in the message payload and send it. */
387 dst_reg mrf_reg2(MRF, base_mrf + 1);
388 inst = emit(MOV(mrf_reg2, this->control_data_bits));
389 inst->force_writemask_all = true;
390 inst = emit(GS_OPCODE_URB_WRITE);
391 inst->urb_write_flags = urb_write_flags;
392 /* We need to increment Global Offset by 256-bits to make room for
393 * Broadwell's extra "Vertex Count" payload at the beginning of the
394 * URB entry. Since this is an OWord message, Global Offset is counted
395 * in 128-bit units, so we must set it to 2.
396 */
397 if (devinfo->gen >= 8 && gs_prog_data->static_vertex_count == -1)
398 inst->offset = 2;
399 inst->base_mrf = base_mrf;
400 inst->mlen = 2;
401 }
402
403 void
404 vec4_gs_visitor::set_stream_control_data_bits(unsigned stream_id)
405 {
406 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
407
408 /* Note: we are calling this *before* increasing vertex_count, so
409 * this->vertex_count == vertex_count - 1 in the formula above.
410 */
411
412 /* Stream mode uses 2 bits per vertex */
413 assert(c->control_data_bits_per_vertex == 2);
414
415 /* Must be a valid stream */
416 assert(stream_id >= 0 && stream_id < MAX_VERTEX_STREAMS);
417
418 /* Control data bits are initialized to 0 so we don't have to set any
419 * bits when sending vertices to stream 0.
420 */
421 if (stream_id == 0)
422 return;
423
424 /* reg::sid = stream_id */
425 src_reg sid(this, glsl_type::uint_type);
426 emit(MOV(dst_reg(sid), brw_imm_ud(stream_id)));
427
428 /* reg:shift_count = 2 * (vertex_count - 1) */
429 src_reg shift_count(this, glsl_type::uint_type);
430 emit(SHL(dst_reg(shift_count), this->vertex_count, brw_imm_ud(1u)));
431
432 /* Note: we're relying on the fact that the GEN SHL instruction only pays
433 * attention to the lower 5 bits of its second source argument, so on this
434 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
435 * stream_id << ((2 * (vertex_count - 1)) % 32).
436 */
437 src_reg mask(this, glsl_type::uint_type);
438 emit(SHL(dst_reg(mask), sid, shift_count));
439 emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask));
440 }
441
442 void
443 vec4_gs_visitor::gs_emit_vertex(int stream_id)
444 {
445 this->current_annotation = "emit vertex: safety check";
446
447 /* Haswell and later hardware ignores the "Render Stream Select" bits
448 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
449 * and instead sends all primitives down the pipeline for rasterization.
450 * If the SOL stage is enabled, "Render Stream Select" is honored and
451 * primitives bound to non-zero streams are discarded after stream output.
452 *
453 * Since the only purpose of primives sent to non-zero streams is to
454 * be recorded by transform feedback, we can simply discard all geometry
455 * bound to these streams when transform feedback is disabled.
456 */
457 if (stream_id > 0 && !nir->info->has_transform_feedback_varyings)
458 return;
459
460 /* If we're outputting 32 control data bits or less, then we can wait
461 * until the shader is over to output them all. Otherwise we need to
462 * output them as we go. Now is the time to do it, since we're about to
463 * output the vertex_count'th vertex, so it's guaranteed that the
464 * control data bits associated with the (vertex_count - 1)th vertex are
465 * correct.
466 */
467 if (c->control_data_header_size_bits > 32) {
468 this->current_annotation = "emit vertex: emit control data bits";
469 /* Only emit control data bits if we've finished accumulating a batch
470 * of 32 bits. This is the case when:
471 *
472 * (vertex_count * bits_per_vertex) % 32 == 0
473 *
474 * (in other words, when the last 5 bits of vertex_count *
475 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
476 * integer n (which is always the case, since bits_per_vertex is
477 * always 1 or 2), this is equivalent to requiring that the last 5-n
478 * bits of vertex_count are 0:
479 *
480 * vertex_count & (2^(5-n) - 1) == 0
481 *
482 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
483 * equivalent to:
484 *
485 * vertex_count & (32 / bits_per_vertex - 1) == 0
486 */
487 vec4_instruction *inst =
488 emit(AND(dst_null_ud(), this->vertex_count,
489 brw_imm_ud(32 / c->control_data_bits_per_vertex - 1)));
490 inst->conditional_mod = BRW_CONDITIONAL_Z;
491
492 emit(IF(BRW_PREDICATE_NORMAL));
493 {
494 /* If vertex_count is 0, then no control data bits have been
495 * accumulated yet, so we skip emitting them.
496 */
497 emit(CMP(dst_null_ud(), this->vertex_count, brw_imm_ud(0u),
498 BRW_CONDITIONAL_NEQ));
499 emit(IF(BRW_PREDICATE_NORMAL));
500 emit_control_data_bits();
501 emit(BRW_OPCODE_ENDIF);
502
503 /* Reset control_data_bits to 0 so we can start accumulating a new
504 * batch.
505 *
506 * Note: in the case where vertex_count == 0, this neutralizes the
507 * effect of any call to EndPrimitive() that the shader may have
508 * made before outputting its first vertex.
509 */
510 inst = emit(MOV(dst_reg(this->control_data_bits), brw_imm_ud(0u)));
511 inst->force_writemask_all = true;
512 }
513 emit(BRW_OPCODE_ENDIF);
514 }
515
516 this->current_annotation = "emit vertex: vertex data";
517 emit_vertex();
518
519 /* In stream mode we have to set control data bits for all vertices
520 * unless we have disabled control data bits completely (which we do
521 * do for GL_POINTS outputs that don't use streams).
522 */
523 if (c->control_data_header_size_bits > 0 &&
524 gs_prog_data->control_data_format ==
525 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
526 this->current_annotation = "emit vertex: Stream control data bits";
527 set_stream_control_data_bits(stream_id);
528 }
529
530 this->current_annotation = NULL;
531 }
532
533 void
534 vec4_gs_visitor::gs_end_primitive()
535 {
536 /* We can only do EndPrimitive() functionality when the control data
537 * consists of cut bits. Fortunately, the only time it isn't is when the
538 * output type is points, in which case EndPrimitive() is a no-op.
539 */
540 if (gs_prog_data->control_data_format !=
541 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
542 return;
543 }
544
545 if (c->control_data_header_size_bits == 0)
546 return;
547
548 /* Cut bits use one bit per vertex. */
549 assert(c->control_data_bits_per_vertex == 1);
550
551 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
552 * vertex n, 0 otherwise. So all we need to do here is mark bit
553 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
554 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
555 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
556 *
557 * Note that if EndPrimitve() is called before emitting any vertices, this
558 * will cause us to set bit 31 of the control_data_bits register to 1.
559 * That's fine because:
560 *
561 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
562 * output, so the hardware will ignore cut bit 31.
563 *
564 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
565 * last vertex, so setting cut bit 31 has no effect (since the primitive
566 * is automatically ended when the GS terminates).
567 *
568 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
569 * control_data_bits register to 0 when the first vertex is emitted.
570 */
571
572 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
573 src_reg one(this, glsl_type::uint_type);
574 emit(MOV(dst_reg(one), brw_imm_ud(1u)));
575 src_reg prev_count(this, glsl_type::uint_type);
576 emit(ADD(dst_reg(prev_count), this->vertex_count, brw_imm_ud(0xffffffffu)));
577 src_reg mask(this, glsl_type::uint_type);
578 /* Note: we're relying on the fact that the GEN SHL instruction only pays
579 * attention to the lower 5 bits of its second source argument, so on this
580 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
581 * ((vertex_count - 1) % 32).
582 */
583 emit(SHL(dst_reg(mask), one, prev_count));
584 emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask));
585 }
586
587 extern "C" const unsigned *
588 brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
589 void *mem_ctx,
590 const struct brw_gs_prog_key *key,
591 struct brw_gs_prog_data *prog_data,
592 const nir_shader *src_shader,
593 struct gl_shader_program *shader_prog,
594 int shader_time_index,
595 unsigned *final_assembly_size,
596 char **error_str)
597 {
598 struct brw_gs_compile c;
599 memset(&c, 0, sizeof(c));
600 c.key = *key;
601
602 const bool is_scalar = compiler->scalar_stage[MESA_SHADER_GEOMETRY];
603 nir_shader *shader = nir_shader_clone(mem_ctx, src_shader);
604
605 /* The GLSL linker will have already matched up GS inputs and the outputs
606 * of prior stages. The driver does extend VS outputs in some cases, but
607 * only for legacy OpenGL or Gen4-5 hardware, neither of which offer
608 * geometry shader support. So we can safely ignore that.
609 *
610 * For SSO pipelines, we use a fixed VUE map layout based on variable
611 * locations, so we can rely on rendezvous-by-location making this work.
612 *
613 * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not
614 * written by previous stages and shows up via payload magic.
615 */
616 GLbitfield64 inputs_read =
617 shader->info->inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
618 brw_compute_vue_map(compiler->devinfo,
619 &c.input_vue_map, inputs_read,
620 shader->info->separate_shader);
621
622 shader = brw_nir_apply_sampler_key(shader, compiler->devinfo, &key->tex,
623 is_scalar);
624 brw_nir_lower_vue_inputs(shader, is_scalar, &c.input_vue_map);
625 brw_nir_lower_vue_outputs(shader, is_scalar);
626 shader = brw_postprocess_nir(shader, compiler->devinfo, is_scalar);
627
628 prog_data->include_primitive_id =
629 (shader->info->inputs_read & VARYING_BIT_PRIMITIVE_ID) != 0;
630
631 prog_data->invocations = shader->info->gs.invocations;
632
633 if (compiler->devinfo->gen >= 8)
634 prog_data->static_vertex_count = nir_gs_count_vertices(shader);
635
636 if (compiler->devinfo->gen >= 7) {
637 if (shader->info->gs.output_primitive == GL_POINTS) {
638 /* When the output type is points, the geometry shader may output data
639 * to multiple streams, and EndPrimitive() has no effect. So we
640 * configure the hardware to interpret the control data as stream ID.
641 */
642 prog_data->control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID;
643
644 /* We only have to emit control bits if we are using streams */
645 if (shader_prog && shader_prog->Geom.UsesStreams)
646 c.control_data_bits_per_vertex = 2;
647 else
648 c.control_data_bits_per_vertex = 0;
649 } else {
650 /* When the output type is triangle_strip or line_strip, EndPrimitive()
651 * may be used to terminate the current strip and start a new one
652 * (similar to primitive restart), and outputting data to multiple
653 * streams is not supported. So we configure the hardware to interpret
654 * the control data as EndPrimitive information (a.k.a. "cut bits").
655 */
656 prog_data->control_data_format = GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT;
657
658 /* We only need to output control data if the shader actually calls
659 * EndPrimitive().
660 */
661 c.control_data_bits_per_vertex =
662 shader->info->gs.uses_end_primitive ? 1 : 0;
663 }
664 } else {
665 /* There are no control data bits in gen6. */
666 c.control_data_bits_per_vertex = 0;
667
668 /* If it is using transform feedback, enable it */
669 if (shader->info->has_transform_feedback_varyings)
670 prog_data->gen6_xfb_enabled = true;
671 else
672 prog_data->gen6_xfb_enabled = false;
673 }
674 c.control_data_header_size_bits =
675 shader->info->gs.vertices_out * c.control_data_bits_per_vertex;
676
677 /* 1 HWORD = 32 bytes = 256 bits */
678 prog_data->control_data_header_size_hwords =
679 ALIGN(c.control_data_header_size_bits, 256) / 256;
680
681 /* Compute the output vertex size.
682 *
683 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
684 * Size (p168):
685 *
686 * [0,62] indicating [1,63] 16B units
687 *
688 * Specifies the size of each vertex stored in the GS output entry
689 * (following any Control Header data) as a number of 128-bit units
690 * (minus one).
691 *
692 * Programming Restrictions: The vertex size must be programmed as a
693 * multiple of 32B units with the following exception: Rendering is
694 * disabled (as per SOL stage state) and the vertex size output by the
695 * GS thread is 16B.
696 *
697 * If rendering is enabled (as per SOL state) the vertex size must be
698 * programmed as a multiple of 32B units. In other words, the only time
699 * software can program a vertex size with an odd number of 16B units
700 * is when rendering is disabled.
701 *
702 * Note: B=bytes in the above text.
703 *
704 * It doesn't seem worth the extra trouble to optimize the case where the
705 * vertex size is 16B (especially since this would require special-casing
706 * the GEN assembly that writes to the URB). So we just set the vertex
707 * size to a multiple of 32B (2 vec4's) in all cases.
708 *
709 * The maximum output vertex size is 62*16 = 992 bytes (31 hwords). We
710 * budget that as follows:
711 *
712 * 512 bytes for varyings (a varying component is 4 bytes and
713 * gl_MaxGeometryOutputComponents = 128)
714 * 16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
715 * bytes)
716 * 16 bytes overhead for gl_Position (we allocate it a slot in the VUE
717 * even if it's not used)
718 * 32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
719 * whenever clip planes are enabled, even if the shader doesn't
720 * write to gl_ClipDistance)
721 * 16 bytes overhead since the VUE size must be a multiple of 32 bytes
722 * (see below)--this causes up to 1 VUE slot to be wasted
723 * 400 bytes available for varying packing overhead
724 *
725 * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
726 * per interpolation type, so this is plenty.
727 *
728 */
729 unsigned output_vertex_size_bytes = prog_data->base.vue_map.num_slots * 16;
730 assert(compiler->devinfo->gen == 6 ||
731 output_vertex_size_bytes <= GEN7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES);
732 prog_data->output_vertex_size_hwords =
733 ALIGN(output_vertex_size_bytes, 32) / 32;
734
735 /* Compute URB entry size. The maximum allowed URB entry size is 32k.
736 * That divides up as follows:
737 *
738 * 64 bytes for the control data header (cut indices or StreamID bits)
739 * 4096 bytes for varyings (a varying component is 4 bytes and
740 * gl_MaxGeometryTotalOutputComponents = 1024)
741 * 4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
742 * bytes/vertex and gl_MaxGeometryOutputVertices is 256)
743 * 4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
744 * even if it's not used)
745 * 8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
746 * whenever clip planes are enabled, even if the shader doesn't
747 * write to gl_ClipDistance)
748 * 4096 bytes overhead since the VUE size must be a multiple of 32
749 * bytes (see above)--this causes up to 1 VUE slot to be wasted
750 * 8128 bytes available for varying packing overhead
751 *
752 * Worst-case varying packing overhead is 3/4 of a varying slot per
753 * interpolation type, which works out to 3072 bytes, so this would allow
754 * us to accommodate 2 interpolation types without any danger of running
755 * out of URB space.
756 *
757 * In practice, the risk of running out of URB space is very small, since
758 * the above figures are all worst-case, and most of them scale with the
759 * number of output vertices. So we'll just calculate the amount of space
760 * we need, and if it's too large, fail to compile.
761 *
762 * The above is for gen7+ where we have a single URB entry that will hold
763 * all the output. In gen6, we will have to allocate URB entries for every
764 * vertex we emit, so our URB entries only need to be large enough to hold
765 * a single vertex. Also, gen6 does not have a control data header.
766 */
767 unsigned output_size_bytes;
768 if (compiler->devinfo->gen >= 7) {
769 output_size_bytes =
770 prog_data->output_vertex_size_hwords * 32 * shader->info->gs.vertices_out;
771 output_size_bytes += 32 * prog_data->control_data_header_size_hwords;
772 } else {
773 output_size_bytes = prog_data->output_vertex_size_hwords * 32;
774 }
775
776 /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
777 * which comes before the control header.
778 */
779 if (compiler->devinfo->gen >= 8)
780 output_size_bytes += 32;
781
782 assert(output_size_bytes >= 1);
783 unsigned max_output_size_bytes = GEN7_MAX_GS_URB_ENTRY_SIZE_BYTES;
784 if (compiler->devinfo->gen == 6)
785 max_output_size_bytes = GEN6_MAX_GS_URB_ENTRY_SIZE_BYTES;
786 if (output_size_bytes > max_output_size_bytes)
787 return NULL;
788
789
790 /* URB entry sizes are stored as a multiple of 64 bytes in gen7+ and
791 * a multiple of 128 bytes in gen6.
792 */
793 if (compiler->devinfo->gen >= 7)
794 prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
795 else
796 prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;
797
798 prog_data->output_topology =
799 get_hw_prim_for_gl_prim(shader->info->gs.output_primitive);
800
801 prog_data->vertices_in = shader->info->gs.vertices_in;
802
803 /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
804 * need to program a URB read length of ceiling(num_slots / 2).
805 */
806 prog_data->base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2;
807
808 /* Now that prog_data setup is done, we are ready to actually compile the
809 * program.
810 */
811 if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
812 fprintf(stderr, "GS Input ");
813 brw_print_vue_map(stderr, &c.input_vue_map);
814 fprintf(stderr, "GS Output ");
815 brw_print_vue_map(stderr, &prog_data->base.vue_map);
816 }
817
818 if (is_scalar) {
819 fs_visitor v(compiler, log_data, mem_ctx, &c, prog_data, shader,
820 shader_time_index);
821 if (v.run_gs()) {
822 prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
823 prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
824
825 fs_generator g(compiler, log_data, mem_ctx, &c.key,
826 &prog_data->base.base, v.promoted_constants,
827 false, MESA_SHADER_GEOMETRY);
828 if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
829 const char *label =
830 shader->info->label ? shader->info->label : "unnamed";
831 char *name = ralloc_asprintf(mem_ctx, "%s geometry shader %s",
832 label, shader->info->name);
833 g.enable_debug(name);
834 }
835 g.generate_code(v.cfg, 8);
836 return g.get_assembly(final_assembly_size);
837 }
838 }
839
840 if (compiler->devinfo->gen >= 7) {
841 /* Compile the geometry shader in DUAL_OBJECT dispatch mode, if we can do
842 * so without spilling. If the GS invocations count > 1, then we can't use
843 * dual object mode.
844 */
845 if (prog_data->invocations <= 1 &&
846 likely(!(INTEL_DEBUG & DEBUG_NO_DUAL_OBJECT_GS))) {
847 prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
848
849 vec4_gs_visitor v(compiler, log_data, &c, prog_data, shader,
850 mem_ctx, true /* no_spills */, shader_time_index);
851 if (v.run()) {
852 return brw_vec4_generate_assembly(compiler, log_data, mem_ctx,
853 shader, &prog_data->base, v.cfg,
854 final_assembly_size);
855 }
856 }
857 }
858
859 /* Either we failed to compile in DUAL_OBJECT mode (probably because it
860 * would have required spilling) or DUAL_OBJECT mode is disabled. So fall
861 * back to DUAL_INSTANCED or SINGLE mode, which consumes fewer registers.
862 *
863 * FIXME: Single dispatch mode requires that the driver can handle
864 * interleaving of input registers, but this is already supported (dual
865 * instance mode has the same requirement). However, to take full advantage
866 * of single dispatch mode to reduce register pressure we would also need to
867 * do interleaved outputs, but currently, the vec4 visitor and generator
868 * classes do not support this, so at the moment register pressure in
869 * single and dual instance modes is the same.
870 *
871 * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 "3DSTATE_GS"
872 * "If InstanceCount>1, DUAL_OBJECT mode is invalid. Software will likely
873 * want to use DUAL_INSTANCE mode for higher performance, but SINGLE mode
874 * is also supported. When InstanceCount=1 (one instance per object) software
875 * can decide which dispatch mode to use. DUAL_OBJECT mode would likely be
876 * the best choice for performance, followed by SINGLE mode."
877 *
878 * So SINGLE mode is more performant when invocations == 1 and DUAL_INSTANCE
879 * mode is more performant when invocations > 1. Gen6 only supports
880 * SINGLE mode.
881 */
882 if (prog_data->invocations <= 1 || compiler->devinfo->gen < 7)
883 prog_data->base.dispatch_mode = DISPATCH_MODE_4X1_SINGLE;
884 else
885 prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_INSTANCE;
886
887 vec4_gs_visitor *gs = NULL;
888 const unsigned *ret = NULL;
889
890 if (compiler->devinfo->gen >= 7)
891 gs = new vec4_gs_visitor(compiler, log_data, &c, prog_data,
892 shader, mem_ctx, false /* no_spills */,
893 shader_time_index);
894 else
895 gs = new gen6_gs_visitor(compiler, log_data, &c, prog_data, shader_prog,
896 shader, mem_ctx, false /* no_spills */,
897 shader_time_index);
898
899 if (!gs->run()) {
900 if (error_str)
901 *error_str = ralloc_strdup(mem_ctx, gs->fail_msg);
902 } else {
903 ret = brw_vec4_generate_assembly(compiler, log_data, mem_ctx, shader,
904 &prog_data->base, gs->cfg,
905 final_assembly_size);
906 }
907
908 delete gs;
909 return ret;
910 }
911
912
913 } /* namespace brw */