i965/drm: Rename drm_bacon_bo to brw_bo.
[mesa.git] / src / mesa / drivers / dri / i965 / gen7_sol_state.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /**
25 * @file gen7_sol_state.c
26 *
27 * Controls the stream output logic (SOL) stage of the gen7 hardware, which is
28 * used to implement GL_EXT_transform_feedback.
29 */
30
31 #include "brw_context.h"
32 #include "brw_state.h"
33 #include "brw_defines.h"
34 #include "intel_batchbuffer.h"
35 #include "intel_buffer_objects.h"
36 #include "main/transformfeedback.h"
37
38 static void
39 upload_3dstate_so_buffers(struct brw_context *brw)
40 {
41 struct gl_context *ctx = &brw->ctx;
42 /* BRW_NEW_TRANSFORM_FEEDBACK */
43 struct gl_transform_feedback_object *xfb_obj =
44 ctx->TransformFeedback.CurrentObject;
45 const struct gl_transform_feedback_info *linked_xfb_info =
46 xfb_obj->program->sh.LinkedTransformFeedback;
47 int i;
48
49 /* Set up the up to 4 output buffers. These are the ranges defined in the
50 * gl_transform_feedback_object.
51 */
52 for (i = 0; i < 4; i++) {
53 struct intel_buffer_object *bufferobj =
54 intel_buffer_object(xfb_obj->Buffers[i]);
55 struct brw_bo *bo;
56 uint32_t start, end;
57 uint32_t stride;
58
59 if (!xfb_obj->Buffers[i]) {
60 /* The pitch of 0 in this command indicates that the buffer is
61 * unbound and won't be written to.
62 */
63 BEGIN_BATCH(4);
64 OUT_BATCH(_3DSTATE_SO_BUFFER << 16 | (4 - 2));
65 OUT_BATCH((i << SO_BUFFER_INDEX_SHIFT));
66 OUT_BATCH(0);
67 OUT_BATCH(0);
68 ADVANCE_BATCH();
69
70 continue;
71 }
72
73 stride = linked_xfb_info->Buffers[i].Stride * 4;
74
75 start = xfb_obj->Offset[i];
76 assert(start % 4 == 0);
77 end = ALIGN(start + xfb_obj->Size[i], 4);
78 bo = intel_bufferobj_buffer(brw, bufferobj, start, end - start);
79 assert(end <= bo->size);
80
81 BEGIN_BATCH(4);
82 OUT_BATCH(_3DSTATE_SO_BUFFER << 16 | (4 - 2));
83 OUT_BATCH((i << SO_BUFFER_INDEX_SHIFT) | stride);
84 OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, start);
85 OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, end);
86 ADVANCE_BATCH();
87 }
88 }
89
90 /**
91 * Outputs the 3DSTATE_SO_DECL_LIST command.
92 *
93 * The data output is a series of 64-bit entries containing a SO_DECL per
94 * stream. We only have one stream of rendering coming out of the GS unit, so
95 * we only emit stream 0 (low 16 bits) SO_DECLs.
96 */
97 void
98 gen7_upload_3dstate_so_decl_list(struct brw_context *brw,
99 const struct brw_vue_map *vue_map)
100 {
101 struct gl_context *ctx = &brw->ctx;
102 /* BRW_NEW_TRANSFORM_FEEDBACK */
103 struct gl_transform_feedback_object *xfb_obj =
104 ctx->TransformFeedback.CurrentObject;
105 const struct gl_transform_feedback_info *linked_xfb_info =
106 xfb_obj->program->sh.LinkedTransformFeedback;
107 uint16_t so_decl[MAX_VERTEX_STREAMS][128];
108 int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
109 int next_offset[BRW_MAX_SOL_BUFFERS] = {0, 0, 0, 0};
110 int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
111 int max_decls = 0;
112 STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
113
114 memset(so_decl, 0, sizeof(so_decl));
115
116 /* Construct the list of SO_DECLs to be emitted. The formatting of the
117 * command is feels strange -- each dword pair contains a SO_DECL per stream.
118 */
119 for (unsigned i = 0; i < linked_xfb_info->NumOutputs; i++) {
120 int buffer = linked_xfb_info->Outputs[i].OutputBuffer;
121 uint16_t decl = 0;
122 int varying = linked_xfb_info->Outputs[i].OutputRegister;
123 const unsigned components = linked_xfb_info->Outputs[i].NumComponents;
124 unsigned component_mask = (1 << components) - 1;
125 unsigned stream_id = linked_xfb_info->Outputs[i].StreamId;
126 unsigned decl_buffer_slot = buffer << SO_DECL_OUTPUT_BUFFER_SLOT_SHIFT;
127 assert(stream_id < MAX_VERTEX_STREAMS);
128
129 /* gl_PointSize is stored in VARYING_SLOT_PSIZ.w
130 * gl_Layer is stored in VARYING_SLOT_PSIZ.y
131 * gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
132 */
133 if (varying == VARYING_SLOT_PSIZ) {
134 assert(components == 1);
135 component_mask <<= 3;
136 } else if (varying == VARYING_SLOT_LAYER) {
137 assert(components == 1);
138 component_mask <<= 1;
139 } else if (varying == VARYING_SLOT_VIEWPORT) {
140 assert(components == 1);
141 component_mask <<= 2;
142 } else {
143 component_mask <<= linked_xfb_info->Outputs[i].ComponentOffset;
144 }
145
146 buffer_mask[stream_id] |= 1 << buffer;
147
148 decl |= decl_buffer_slot;
149 if (varying == VARYING_SLOT_LAYER || varying == VARYING_SLOT_VIEWPORT) {
150 decl |= vue_map->varying_to_slot[VARYING_SLOT_PSIZ] <<
151 SO_DECL_REGISTER_INDEX_SHIFT;
152 } else {
153 assert(vue_map->varying_to_slot[varying] >= 0);
154 decl |= vue_map->varying_to_slot[varying] <<
155 SO_DECL_REGISTER_INDEX_SHIFT;
156 }
157 decl |= component_mask << SO_DECL_COMPONENT_MASK_SHIFT;
158
159 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
160 * array. Instead, it simply increments DstOffset for the following
161 * input by the number of components that should be skipped.
162 *
163 * Our hardware is unusual in that it requires us to program SO_DECLs
164 * for fake "hole" components, rather than simply taking the offset
165 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
166 * program as many size = 4 holes as we can, then a final hole to
167 * accommodate the final 1, 2, or 3 remaining.
168 */
169 int skip_components =
170 linked_xfb_info->Outputs[i].DstOffset - next_offset[buffer];
171
172 next_offset[buffer] += skip_components;
173
174 while (skip_components >= 4) {
175 so_decl[stream_id][decls[stream_id]++] =
176 SO_DECL_HOLE_FLAG | 0xf | decl_buffer_slot;
177 skip_components -= 4;
178 }
179 if (skip_components > 0)
180 so_decl[stream_id][decls[stream_id]++] =
181 SO_DECL_HOLE_FLAG | ((1 << skip_components) - 1) |
182 decl_buffer_slot;
183
184 assert(linked_xfb_info->Outputs[i].DstOffset == next_offset[buffer]);
185
186 next_offset[buffer] += components;
187
188 so_decl[stream_id][decls[stream_id]++] = decl;
189
190 if (decls[stream_id] > max_decls)
191 max_decls = decls[stream_id];
192 }
193
194 BEGIN_BATCH(max_decls * 2 + 3);
195 OUT_BATCH(_3DSTATE_SO_DECL_LIST << 16 | (max_decls * 2 + 1));
196
197 OUT_BATCH((buffer_mask[0] << SO_STREAM_TO_BUFFER_SELECTS_0_SHIFT) |
198 (buffer_mask[1] << SO_STREAM_TO_BUFFER_SELECTS_1_SHIFT) |
199 (buffer_mask[2] << SO_STREAM_TO_BUFFER_SELECTS_2_SHIFT) |
200 (buffer_mask[3] << SO_STREAM_TO_BUFFER_SELECTS_3_SHIFT));
201
202 OUT_BATCH((decls[0] << SO_NUM_ENTRIES_0_SHIFT) |
203 (decls[1] << SO_NUM_ENTRIES_1_SHIFT) |
204 (decls[2] << SO_NUM_ENTRIES_2_SHIFT) |
205 (decls[3] << SO_NUM_ENTRIES_3_SHIFT));
206
207 for (int i = 0; i < max_decls; i++) {
208 /* Stream 1 | Stream 0 */
209 OUT_BATCH(((uint32_t) so_decl[1][i]) << 16 | so_decl[0][i]);
210 /* Stream 3 | Stream 2 */
211 OUT_BATCH(((uint32_t) so_decl[3][i]) << 16 | so_decl[2][i]);
212 }
213
214 ADVANCE_BATCH();
215 }
216
217 static bool
218 query_active(struct gl_query_object *q)
219 {
220 return q && q->Active;
221 }
222
223 static void
224 upload_3dstate_streamout(struct brw_context *brw, bool active,
225 const struct brw_vue_map *vue_map)
226 {
227 struct gl_context *ctx = &brw->ctx;
228 /* BRW_NEW_TRANSFORM_FEEDBACK */
229 struct gl_transform_feedback_object *xfb_obj =
230 ctx->TransformFeedback.CurrentObject;
231 uint32_t dw1 = 0, dw2 = 0, dw3 = 0, dw4 = 0;
232 int i;
233
234 if (active) {
235 const struct gl_transform_feedback_info *linked_xfb_info =
236 xfb_obj->program->sh.LinkedTransformFeedback;
237 int urb_entry_read_offset = 0;
238 int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
239 urb_entry_read_offset;
240
241 dw1 |= SO_FUNCTION_ENABLE;
242 dw1 |= SO_STATISTICS_ENABLE;
243
244 /* BRW_NEW_RASTERIZER_DISCARD */
245 if (ctx->RasterDiscard) {
246 if (!query_active(ctx->Query.PrimitivesGenerated[0])) {
247 dw1 |= SO_RENDERING_DISABLE;
248 } else {
249 perf_debug("Rasterizer discard with a GL_PRIMITIVES_GENERATED "
250 "query active relies on the clipper.");
251 }
252 }
253
254 /* _NEW_LIGHT */
255 if (ctx->Light.ProvokingVertex != GL_FIRST_VERTEX_CONVENTION)
256 dw1 |= SO_REORDER_TRAILING;
257
258 if (brw->gen < 8) {
259 for (i = 0; i < 4; i++) {
260 if (xfb_obj->Buffers[i]) {
261 dw1 |= SO_BUFFER_ENABLE(i);
262 }
263 }
264 }
265
266 /* We always read the whole vertex. This could be reduced at some
267 * point by reading less and offsetting the register index in the
268 * SO_DECLs.
269 */
270 dw2 |= SET_FIELD(urb_entry_read_offset, SO_STREAM_0_VERTEX_READ_OFFSET);
271 dw2 |= SET_FIELD(urb_entry_read_length - 1, SO_STREAM_0_VERTEX_READ_LENGTH);
272
273 dw2 |= SET_FIELD(urb_entry_read_offset, SO_STREAM_1_VERTEX_READ_OFFSET);
274 dw2 |= SET_FIELD(urb_entry_read_length - 1, SO_STREAM_1_VERTEX_READ_LENGTH);
275
276 dw2 |= SET_FIELD(urb_entry_read_offset, SO_STREAM_2_VERTEX_READ_OFFSET);
277 dw2 |= SET_FIELD(urb_entry_read_length - 1, SO_STREAM_2_VERTEX_READ_LENGTH);
278
279 dw2 |= SET_FIELD(urb_entry_read_offset, SO_STREAM_3_VERTEX_READ_OFFSET);
280 dw2 |= SET_FIELD(urb_entry_read_length - 1, SO_STREAM_3_VERTEX_READ_LENGTH);
281
282 if (brw->gen >= 8) {
283 /* Set buffer pitches; 0 means unbound. */
284 if (xfb_obj->Buffers[0])
285 dw3 |= linked_xfb_info->Buffers[0].Stride * 4;
286 if (xfb_obj->Buffers[1])
287 dw3 |= (linked_xfb_info->Buffers[1].Stride * 4) << 16;
288 if (xfb_obj->Buffers[2])
289 dw4 |= linked_xfb_info->Buffers[2].Stride * 4;
290 if (xfb_obj->Buffers[3])
291 dw4 |= (linked_xfb_info->Buffers[3].Stride * 4) << 16;
292 }
293 }
294
295 const int dwords = brw->gen >= 8 ? 5 : 3;
296
297 BEGIN_BATCH(dwords);
298 OUT_BATCH(_3DSTATE_STREAMOUT << 16 | (dwords - 2));
299 OUT_BATCH(dw1);
300 OUT_BATCH(dw2);
301 if (dwords > 3) {
302 OUT_BATCH(dw3);
303 OUT_BATCH(dw4);
304 }
305 ADVANCE_BATCH();
306 }
307
308 static void
309 upload_sol_state(struct brw_context *brw)
310 {
311 struct gl_context *ctx = &brw->ctx;
312 /* BRW_NEW_TRANSFORM_FEEDBACK */
313 bool active = _mesa_is_xfb_active_and_unpaused(ctx);
314
315 if (active) {
316 if (brw->gen >= 8)
317 gen8_upload_3dstate_so_buffers(brw);
318 else
319 upload_3dstate_so_buffers(brw);
320
321 /* BRW_NEW_VUE_MAP_GEOM_OUT */
322 gen7_upload_3dstate_so_decl_list(brw, &brw->vue_map_geom_out);
323 }
324
325 /* Finally, set up the SOL stage. This command must always follow updates to
326 * the nonpipelined SOL state (3DSTATE_SO_BUFFER, 3DSTATE_SO_DECL_LIST) or
327 * MMIO register updates (current performed by the kernel at each batch
328 * emit).
329 */
330 upload_3dstate_streamout(brw, active, &brw->vue_map_geom_out);
331 }
332
333 const struct brw_tracked_state gen7_sol_state = {
334 .dirty = {
335 .mesa = _NEW_LIGHT,
336 .brw = BRW_NEW_BATCH |
337 BRW_NEW_BLORP |
338 BRW_NEW_RASTERIZER_DISCARD |
339 BRW_NEW_VUE_MAP_GEOM_OUT |
340 BRW_NEW_TRANSFORM_FEEDBACK,
341 },
342 .emit = upload_sol_state,
343 };
344
345 void
346 gen7_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
347 struct gl_transform_feedback_object *obj)
348 {
349 struct brw_context *brw = brw_context(ctx);
350 struct brw_transform_feedback_object *brw_obj =
351 (struct brw_transform_feedback_object *) obj;
352
353 assert(brw->gen == 7);
354
355 /* We're about to lose the information needed to compute the number of
356 * vertices written during the last Begin/EndTransformFeedback section,
357 * so we can't delay it any further.
358 */
359 brw_compute_xfb_vertices_written(brw, brw_obj);
360
361 /* No primitives have been generated yet. */
362 for (int i = 0; i < BRW_MAX_XFB_STREAMS; i++) {
363 brw_obj->prims_generated[i] = 0;
364 }
365
366 /* Store the starting value of the SO_NUM_PRIMS_WRITTEN counters. */
367 brw_save_primitives_written_counters(brw, brw_obj);
368
369 /* Reset the SO buffer offsets to 0. */
370 if (!can_do_pipelined_register_writes(brw->screen)) {
371 intel_batchbuffer_flush(brw);
372 brw->batch.needs_sol_reset = true;
373 } else {
374 for (int i = 0; i < 4; i++) {
375 BEGIN_BATCH(3);
376 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
377 OUT_BATCH(GEN7_SO_WRITE_OFFSET(i));
378 OUT_BATCH(0);
379 ADVANCE_BATCH();
380 }
381 }
382
383 brw_obj->primitive_mode = mode;
384 }
385
386 void
387 gen7_end_transform_feedback(struct gl_context *ctx,
388 struct gl_transform_feedback_object *obj)
389 {
390 /* After EndTransformFeedback, it's likely that the client program will try
391 * to draw using the contents of the transform feedback buffer as vertex
392 * input. In order for this to work, we need to flush the data through at
393 * least the GS stage of the pipeline, and flush out the render cache. For
394 * simplicity, just do a full flush.
395 */
396 struct brw_context *brw = brw_context(ctx);
397 struct brw_transform_feedback_object *brw_obj =
398 (struct brw_transform_feedback_object *) obj;
399
400 /* Store the ending value of the SO_NUM_PRIMS_WRITTEN counters. */
401 if (!obj->Paused)
402 brw_save_primitives_written_counters(brw, brw_obj);
403
404 /* EndTransformFeedback() means that we need to update the number of
405 * vertices written. Since it's only necessary if DrawTransformFeedback()
406 * is called and it means mapping a buffer object, we delay computing it
407 * until it's absolutely necessary to try and avoid stalls.
408 */
409 brw_obj->vertices_written_valid = false;
410 }
411
412 void
413 gen7_pause_transform_feedback(struct gl_context *ctx,
414 struct gl_transform_feedback_object *obj)
415 {
416 struct brw_context *brw = brw_context(ctx);
417 struct brw_transform_feedback_object *brw_obj =
418 (struct brw_transform_feedback_object *) obj;
419
420 /* Flush any drawing so that the counters have the right values. */
421 brw_emit_mi_flush(brw);
422
423 assert(brw->gen == 7);
424
425 /* Save the SOL buffer offset register values. */
426 for (int i = 0; i < 4; i++) {
427 BEGIN_BATCH(3);
428 OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
429 OUT_BATCH(GEN7_SO_WRITE_OFFSET(i));
430 OUT_RELOC(brw_obj->offset_bo,
431 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
432 i * sizeof(uint32_t));
433 ADVANCE_BATCH();
434 }
435
436 /* Store the temporary ending value of the SO_NUM_PRIMS_WRITTEN counters.
437 * While this operation is paused, other transform feedback actions may
438 * occur, which will contribute to the counters. We need to exclude that
439 * from our counts.
440 */
441 brw_save_primitives_written_counters(brw, brw_obj);
442 }
443
444 void
445 gen7_resume_transform_feedback(struct gl_context *ctx,
446 struct gl_transform_feedback_object *obj)
447 {
448 struct brw_context *brw = brw_context(ctx);
449 struct brw_transform_feedback_object *brw_obj =
450 (struct brw_transform_feedback_object *) obj;
451
452 assert(brw->gen == 7);
453
454 /* Reload the SOL buffer offset registers. */
455 for (int i = 0; i < 4; i++) {
456 BEGIN_BATCH(3);
457 OUT_BATCH(GEN7_MI_LOAD_REGISTER_MEM | (3 - 2));
458 OUT_BATCH(GEN7_SO_WRITE_OFFSET(i));
459 OUT_RELOC(brw_obj->offset_bo,
460 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
461 i * sizeof(uint32_t));
462 ADVANCE_BATCH();
463 }
464
465 /* Store the new starting value of the SO_NUM_PRIMS_WRITTEN counters. */
466 brw_save_primitives_written_counters(brw, brw_obj);
467 }