2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * @file gen7_sol_state.c
27 * Controls the stream output logic (SOL) stage of the gen7 hardware, which is
28 * used to implement GL_EXT_transform_feedback.
31 #include "brw_context.h"
32 #include "brw_state.h"
33 #include "brw_defines.h"
34 #include "intel_batchbuffer.h"
35 #include "intel_buffer_objects.h"
38 upload_3dstate_so_buffers(struct brw_context
*brw
)
40 struct intel_context
*intel
= &brw
->intel
;
41 struct gl_context
*ctx
= &intel
->ctx
;
42 /* BRW_NEW_VERTEX_PROGRAM */
43 const struct gl_shader_program
*vs_prog
=
44 ctx
->Shader
.CurrentVertexProgram
;
45 const struct gl_transform_feedback_info
*linked_xfb_info
=
46 &vs_prog
->LinkedTransformFeedback
;
47 /* _NEW_TRANSFORM_FEEDBACK */
48 struct gl_transform_feedback_object
*xfb_obj
=
49 ctx
->TransformFeedback
.CurrentObject
;
52 /* Set up the up to 4 output buffers. These are the ranges defined in the
53 * gl_transform_feedback_object.
55 for (i
= 0; i
< 4; i
++) {
56 struct gl_buffer_object
*bufferobj
= xfb_obj
->Buffers
[i
];
61 if (!xfb_obj
->Buffers
[i
]) {
62 /* The pitch of 0 in this command indicates that the buffer is
63 * unbound and won't be written to.
66 OUT_BATCH(_3DSTATE_SO_BUFFER
<< 16 | (4 - 2));
67 OUT_BATCH((i
<< SO_BUFFER_INDEX_SHIFT
));
75 bo
= intel_buffer_object(bufferobj
)->buffer
;
76 stride
= linked_xfb_info
->BufferStride
[i
] * 4;
78 start
= xfb_obj
->Offset
[i
];
79 assert(start
% 4 == 0);
80 end
= ALIGN(start
+ xfb_obj
->Size
[i
], 4);
81 assert(end
<= bo
->size
);
83 /* Offset the starting offset by the current vertex index into the
84 * feedback buffer, offset register is always set to 0 at the start of the
87 start
+= brw
->sol
.offset_0_batch_start
* stride
;
91 OUT_BATCH(_3DSTATE_SO_BUFFER
<< 16 | (4 - 2));
92 OUT_BATCH((i
<< SO_BUFFER_INDEX_SHIFT
) | stride
);
93 OUT_RELOC(bo
, I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
, start
);
94 OUT_RELOC(bo
, I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
, end
);
100 * Outputs the 3DSTATE_SO_DECL_LIST command.
102 * The data output is a series of 64-bit entries containing a SO_DECL per
103 * stream. We only have one stream of rendering coming out of the GS unit, so
104 * we only emit stream 0 (low 16 bits) SO_DECLs.
107 upload_3dstate_so_decl_list(struct brw_context
*brw
,
108 struct brw_vue_map
*vue_map
)
110 struct intel_context
*intel
= &brw
->intel
;
111 struct gl_context
*ctx
= &intel
->ctx
;
112 /* BRW_NEW_VERTEX_PROGRAM */
113 const struct gl_shader_program
*vs_prog
=
114 ctx
->Shader
.CurrentVertexProgram
;
115 /* _NEW_TRANSFORM_FEEDBACK */
116 const struct gl_transform_feedback_info
*linked_xfb_info
=
117 &vs_prog
->LinkedTransformFeedback
;
119 uint16_t so_decl
[128];
121 int next_offset
[4] = {0, 0, 0, 0};
123 STATIC_ASSERT(ARRAY_SIZE(so_decl
) >= MAX_PROGRAM_OUTPUTS
);
125 /* Construct the list of SO_DECLs to be emitted. The formatting of the
126 * command is feels strange -- each dword pair contains a SO_DECL per stream.
128 for (i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
129 int buffer
= linked_xfb_info
->Outputs
[i
].OutputBuffer
;
131 int vert_result
= linked_xfb_info
->Outputs
[i
].OutputRegister
;
132 unsigned component_mask
=
133 (1 << linked_xfb_info
->Outputs
[i
].NumComponents
) - 1;
135 /* gl_PointSize is stored in VERT_RESULT_PSIZ.w. */
136 if (vert_result
== VERT_RESULT_PSIZ
) {
137 assert(linked_xfb_info
->Outputs
[i
].NumComponents
== 1);
138 component_mask
<<= 3;
140 component_mask
<<= linked_xfb_info
->Outputs
[i
].ComponentOffset
;
143 buffer_mask
|= 1 << buffer
;
145 decl
|= buffer
<< SO_DECL_OUTPUT_BUFFER_SLOT_SHIFT
;
146 decl
|= vue_map
->vert_result_to_slot
[vert_result
] <<
147 SO_DECL_REGISTER_INDEX_SHIFT
;
148 decl
|= component_mask
<< SO_DECL_COMPONENT_MASK_SHIFT
;
150 /* This assert should be true until GL_ARB_transform_feedback_instanced
151 * is added and we start using the hole flag.
153 assert(linked_xfb_info
->Outputs
[i
].DstOffset
== next_offset
[buffer
]);
155 next_offset
[buffer
] += linked_xfb_info
->Outputs
[i
].NumComponents
;
160 BEGIN_BATCH(linked_xfb_info
->NumOutputs
* 2 + 3);
161 OUT_BATCH(_3DSTATE_SO_DECL_LIST
<< 16 |
162 (linked_xfb_info
->NumOutputs
* 2 + 1));
164 OUT_BATCH((buffer_mask
<< SO_STREAM_TO_BUFFER_SELECTS_0_SHIFT
) |
165 (0 << SO_STREAM_TO_BUFFER_SELECTS_1_SHIFT
) |
166 (0 << SO_STREAM_TO_BUFFER_SELECTS_2_SHIFT
) |
167 (0 << SO_STREAM_TO_BUFFER_SELECTS_3_SHIFT
));
169 OUT_BATCH((linked_xfb_info
->NumOutputs
<< SO_NUM_ENTRIES_0_SHIFT
) |
170 (0 << SO_NUM_ENTRIES_1_SHIFT
) |
171 (0 << SO_NUM_ENTRIES_2_SHIFT
) |
172 (0 << SO_NUM_ENTRIES_3_SHIFT
));
174 for (i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
175 OUT_BATCH(so_decl
[i
]);
183 upload_3dstate_streamout(struct brw_context
*brw
, bool active
,
184 struct brw_vue_map
*vue_map
)
186 struct intel_context
*intel
= &brw
->intel
;
187 struct gl_context
*ctx
= &intel
->ctx
;
188 /* _NEW_TRANSFORM_FEEDBACK */
189 struct gl_transform_feedback_object
*xfb_obj
=
190 ctx
->TransformFeedback
.CurrentObject
;
191 uint32_t dw1
= 0, dw2
= 0;
194 /* _NEW_RASTERIZER_DISCARD */
195 if (ctx
->RasterDiscard
)
196 dw1
|= SO_RENDERING_DISABLE
;
199 int urb_entry_read_offset
= 0;
200 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
201 urb_entry_read_offset
;
203 dw1
|= SO_FUNCTION_ENABLE
;
204 dw1
|= SO_STATISTICS_ENABLE
;
207 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
)
208 dw1
|= SO_REORDER_TRAILING
;
210 for (i
= 0; i
< 4; i
++) {
211 if (xfb_obj
->Buffers
[i
]) {
212 dw1
|= SO_BUFFER_ENABLE(i
);
216 /* We always read the whole vertex. This could be reduced at some
217 * point by reading less and offsetting the register index in the
220 dw2
|= urb_entry_read_offset
<< SO_STREAM_0_VERTEX_READ_OFFSET_SHIFT
;
221 dw2
|= (urb_entry_read_length
- 1) <<
222 SO_STREAM_0_VERTEX_READ_LENGTH_SHIFT
;
226 OUT_BATCH(_3DSTATE_STREAMOUT
<< 16 | (3 - 2));
233 upload_sol_state(struct brw_context
*brw
)
235 struct intel_context
*intel
= &brw
->intel
;
236 struct gl_context
*ctx
= &intel
->ctx
;
237 /* _NEW_TRANSFORM_FEEDBACK */
238 struct gl_transform_feedback_object
*xfb_obj
=
239 ctx
->TransformFeedback
.CurrentObject
;
240 bool active
= xfb_obj
->Active
&& !xfb_obj
->Paused
;
241 struct brw_vue_map vue_map
;
243 /* _NEW_TRANSFORM, CACHE_NEW_VS_PROG */
244 brw_compute_vue_map(&vue_map
, intel
, ctx
->Transform
.ClipPlanesEnabled
!= 0,
245 brw
->vs
.prog_data
->outputs_written
);
248 upload_3dstate_so_buffers(brw
);
249 upload_3dstate_so_decl_list(brw
, &vue_map
);
251 intel
->batch
.needs_sol_reset
= true;
254 /* Finally, set up the SOL stage. This command must always follow updates to
255 * the nonpipelined SOL state (3DSTATE_SO_BUFFER, 3DSTATE_SO_DECL_LIST) or
256 * MMIO register updates (current performed by the kernel at each batch
259 upload_3dstate_streamout(brw
, active
, &vue_map
);
262 const struct brw_tracked_state gen7_sol_state
= {
264 .mesa
= (_NEW_RASTERIZER_DISCARD
|
266 _NEW_TRANSFORM_FEEDBACK
|
268 .brw
= (BRW_NEW_BATCH
|
269 BRW_NEW_VERTEX_PROGRAM
),
270 .cache
= CACHE_NEW_VS_PROG
,
272 .emit
= upload_sol_state
,
276 gen7_end_transform_feedback(struct gl_context
*ctx
,
277 struct gl_transform_feedback_object
*obj
)
279 /* Because we have to rely on the kernel to reset our SO write offsets, and
280 * we only get to do it once per batchbuffer, flush the batch after feedback
281 * so another transform feedback can get the write offset reset it needs.
283 * This also covers any cache flushing required.
285 struct brw_context
*brw
= brw_context(ctx
);
286 struct intel_context
*intel
= &brw
->intel
;
288 intel_batchbuffer_flush(intel
);