2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * @file gen7_sol_state.c
27 * Controls the stream output logic (SOL) stage of the gen7 hardware, which is
28 * used to implement GL_EXT_transform_feedback.
31 #include "brw_context.h"
32 #include "brw_state.h"
33 #include "brw_defines.h"
34 #include "intel_batchbuffer.h"
35 #include "intel_buffer_objects.h"
36 #include "main/transformfeedback.h"
39 upload_3dstate_so_buffers(struct brw_context
*brw
)
41 struct intel_context
*intel
= &brw
->intel
;
42 struct gl_context
*ctx
= &intel
->ctx
;
43 /* BRW_NEW_VERTEX_PROGRAM */
44 const struct gl_shader_program
*vs_prog
=
45 ctx
->Shader
.CurrentVertexProgram
;
46 const struct gl_transform_feedback_info
*linked_xfb_info
=
47 &vs_prog
->LinkedTransformFeedback
;
48 /* _NEW_TRANSFORM_FEEDBACK */
49 struct gl_transform_feedback_object
*xfb_obj
=
50 ctx
->TransformFeedback
.CurrentObject
;
53 /* Set up the up to 4 output buffers. These are the ranges defined in the
54 * gl_transform_feedback_object.
56 for (i
= 0; i
< 4; i
++) {
57 struct intel_buffer_object
*bufferobj
=
58 intel_buffer_object(xfb_obj
->Buffers
[i
]);
63 if (!xfb_obj
->Buffers
[i
]) {
64 /* The pitch of 0 in this command indicates that the buffer is
65 * unbound and won't be written to.
68 OUT_BATCH(_3DSTATE_SO_BUFFER
<< 16 | (4 - 2));
69 OUT_BATCH((i
<< SO_BUFFER_INDEX_SHIFT
));
77 bo
= intel_bufferobj_buffer(intel
, bufferobj
, INTEL_WRITE_PART
);
78 stride
= linked_xfb_info
->BufferStride
[i
] * 4;
80 start
= xfb_obj
->Offset
[i
];
81 assert(start
% 4 == 0);
82 end
= ALIGN(start
+ xfb_obj
->Size
[i
], 4);
83 assert(end
<= bo
->size
);
85 /* Offset the starting offset by the current vertex index into the
86 * feedback buffer, offset register is always set to 0 at the start of the
89 start
+= brw
->sol
.offset_0_batch_start
* stride
;
93 OUT_BATCH(_3DSTATE_SO_BUFFER
<< 16 | (4 - 2));
94 OUT_BATCH((i
<< SO_BUFFER_INDEX_SHIFT
) | stride
);
95 OUT_RELOC(bo
, I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
, start
);
96 OUT_RELOC(bo
, I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
, end
);
102 * Outputs the 3DSTATE_SO_DECL_LIST command.
104 * The data output is a series of 64-bit entries containing a SO_DECL per
105 * stream. We only have one stream of rendering coming out of the GS unit, so
106 * we only emit stream 0 (low 16 bits) SO_DECLs.
109 upload_3dstate_so_decl_list(struct brw_context
*brw
,
110 struct brw_vue_map
*vue_map
)
112 struct intel_context
*intel
= &brw
->intel
;
113 struct gl_context
*ctx
= &intel
->ctx
;
114 /* BRW_NEW_VERTEX_PROGRAM */
115 const struct gl_shader_program
*vs_prog
=
116 ctx
->Shader
.CurrentVertexProgram
;
117 /* _NEW_TRANSFORM_FEEDBACK */
118 const struct gl_transform_feedback_info
*linked_xfb_info
=
119 &vs_prog
->LinkedTransformFeedback
;
121 uint16_t so_decl
[128];
123 int next_offset
[4] = {0, 0, 0, 0};
125 STATIC_ASSERT(ARRAY_SIZE(so_decl
) >= MAX_PROGRAM_OUTPUTS
);
127 /* Construct the list of SO_DECLs to be emitted. The formatting of the
128 * command is feels strange -- each dword pair contains a SO_DECL per stream.
130 for (i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
131 int buffer
= linked_xfb_info
->Outputs
[i
].OutputBuffer
;
133 int varying
= linked_xfb_info
->Outputs
[i
].OutputRegister
;
134 unsigned component_mask
=
135 (1 << linked_xfb_info
->Outputs
[i
].NumComponents
) - 1;
137 /* gl_PointSize is stored in VARYING_SLOT_PSIZ.w. */
138 if (varying
== VARYING_SLOT_PSIZ
) {
139 assert(linked_xfb_info
->Outputs
[i
].NumComponents
== 1);
140 component_mask
<<= 3;
142 component_mask
<<= linked_xfb_info
->Outputs
[i
].ComponentOffset
;
145 buffer_mask
|= 1 << buffer
;
147 decl
|= buffer
<< SO_DECL_OUTPUT_BUFFER_SLOT_SHIFT
;
148 decl
|= vue_map
->varying_to_slot
[varying
] <<
149 SO_DECL_REGISTER_INDEX_SHIFT
;
150 decl
|= component_mask
<< SO_DECL_COMPONENT_MASK_SHIFT
;
152 /* This assert should be true until GL_ARB_transform_feedback_instanced
153 * is added and we start using the hole flag.
155 assert(linked_xfb_info
->Outputs
[i
].DstOffset
== next_offset
[buffer
]);
157 next_offset
[buffer
] += linked_xfb_info
->Outputs
[i
].NumComponents
;
162 BEGIN_BATCH(linked_xfb_info
->NumOutputs
* 2 + 3);
163 OUT_BATCH(_3DSTATE_SO_DECL_LIST
<< 16 |
164 (linked_xfb_info
->NumOutputs
* 2 + 1));
166 OUT_BATCH((buffer_mask
<< SO_STREAM_TO_BUFFER_SELECTS_0_SHIFT
) |
167 (0 << SO_STREAM_TO_BUFFER_SELECTS_1_SHIFT
) |
168 (0 << SO_STREAM_TO_BUFFER_SELECTS_2_SHIFT
) |
169 (0 << SO_STREAM_TO_BUFFER_SELECTS_3_SHIFT
));
171 OUT_BATCH((linked_xfb_info
->NumOutputs
<< SO_NUM_ENTRIES_0_SHIFT
) |
172 (0 << SO_NUM_ENTRIES_1_SHIFT
) |
173 (0 << SO_NUM_ENTRIES_2_SHIFT
) |
174 (0 << SO_NUM_ENTRIES_3_SHIFT
));
176 for (i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
177 OUT_BATCH(so_decl
[i
]);
185 upload_3dstate_streamout(struct brw_context
*brw
, bool active
,
186 struct brw_vue_map
*vue_map
)
188 struct intel_context
*intel
= &brw
->intel
;
189 struct gl_context
*ctx
= &intel
->ctx
;
190 /* _NEW_TRANSFORM_FEEDBACK */
191 struct gl_transform_feedback_object
*xfb_obj
=
192 ctx
->TransformFeedback
.CurrentObject
;
193 uint32_t dw1
= 0, dw2
= 0;
196 /* _NEW_RASTERIZER_DISCARD */
197 if (ctx
->RasterDiscard
)
198 dw1
|= SO_RENDERING_DISABLE
;
201 int urb_entry_read_offset
= 0;
202 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
203 urb_entry_read_offset
;
205 dw1
|= SO_FUNCTION_ENABLE
;
206 dw1
|= SO_STATISTICS_ENABLE
;
209 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
)
210 dw1
|= SO_REORDER_TRAILING
;
212 for (i
= 0; i
< 4; i
++) {
213 if (xfb_obj
->Buffers
[i
]) {
214 dw1
|= SO_BUFFER_ENABLE(i
);
218 /* We always read the whole vertex. This could be reduced at some
219 * point by reading less and offsetting the register index in the
222 dw2
|= urb_entry_read_offset
<< SO_STREAM_0_VERTEX_READ_OFFSET_SHIFT
;
223 dw2
|= (urb_entry_read_length
- 1) <<
224 SO_STREAM_0_VERTEX_READ_LENGTH_SHIFT
;
228 OUT_BATCH(_3DSTATE_STREAMOUT
<< 16 | (3 - 2));
235 upload_sol_state(struct brw_context
*brw
)
237 struct intel_context
*intel
= &brw
->intel
;
238 struct gl_context
*ctx
= &intel
->ctx
;
239 /* _NEW_TRANSFORM_FEEDBACK */
240 bool active
= _mesa_is_xfb_active_and_unpaused(ctx
);
243 upload_3dstate_so_buffers(brw
);
244 /* CACHE_NEW_VS_PROG */
245 upload_3dstate_so_decl_list(brw
, &brw
->vs
.prog_data
->vue_map
);
247 intel
->batch
.needs_sol_reset
= true;
250 /* Finally, set up the SOL stage. This command must always follow updates to
251 * the nonpipelined SOL state (3DSTATE_SO_BUFFER, 3DSTATE_SO_DECL_LIST) or
252 * MMIO register updates (current performed by the kernel at each batch
255 upload_3dstate_streamout(brw
, active
, &brw
->vs
.prog_data
->vue_map
);
258 const struct brw_tracked_state gen7_sol_state
= {
260 .mesa
= (_NEW_RASTERIZER_DISCARD
|
262 _NEW_TRANSFORM_FEEDBACK
),
263 .brw
= (BRW_NEW_BATCH
|
264 BRW_NEW_VERTEX_PROGRAM
),
265 .cache
= CACHE_NEW_VS_PROG
,
267 .emit
= upload_sol_state
,
271 gen7_end_transform_feedback(struct gl_context
*ctx
,
272 struct gl_transform_feedback_object
*obj
)
274 /* Because we have to rely on the kernel to reset our SO write offsets, and
275 * we only get to do it once per batchbuffer, flush the batch after feedback
276 * so another transform feedback can get the write offset reset it needs.
278 * This also covers any cache flushing required.
280 struct brw_context
*brw
= brw_context(ctx
);
281 struct intel_context
*intel
= &brw
->intel
;
283 intel_batchbuffer_flush(intel
);