2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * @file gen7_sol_state.c
27 * Controls the stream output logic (SOL) stage of the gen7 hardware, which is
28 * used to implement GL_EXT_transform_feedback.
31 #include "brw_context.h"
32 #include "brw_state.h"
33 #include "brw_defines.h"
34 #include "intel_batchbuffer.h"
35 #include "intel_buffer_objects.h"
36 #include "main/transformfeedback.h"
39 upload_3dstate_so_buffers(struct brw_context
*brw
)
41 struct intel_context
*intel
= &brw
->intel
;
42 struct gl_context
*ctx
= &intel
->ctx
;
43 /* BRW_NEW_VERTEX_PROGRAM */
44 const struct gl_shader_program
*vs_prog
=
45 ctx
->Shader
.CurrentVertexProgram
;
46 const struct gl_transform_feedback_info
*linked_xfb_info
=
47 &vs_prog
->LinkedTransformFeedback
;
48 /* BRW_NEW_TRANSFORM_FEEDBACK */
49 struct gl_transform_feedback_object
*xfb_obj
=
50 ctx
->TransformFeedback
.CurrentObject
;
53 /* Set up the up to 4 output buffers. These are the ranges defined in the
54 * gl_transform_feedback_object.
56 for (i
= 0; i
< 4; i
++) {
57 struct intel_buffer_object
*bufferobj
=
58 intel_buffer_object(xfb_obj
->Buffers
[i
]);
63 if (!xfb_obj
->Buffers
[i
]) {
64 /* The pitch of 0 in this command indicates that the buffer is
65 * unbound and won't be written to.
68 OUT_BATCH(_3DSTATE_SO_BUFFER
<< 16 | (4 - 2));
69 OUT_BATCH((i
<< SO_BUFFER_INDEX_SHIFT
));
77 bo
= intel_bufferobj_buffer(intel
, bufferobj
, INTEL_WRITE_PART
);
78 stride
= linked_xfb_info
->BufferStride
[i
] * 4;
80 start
= xfb_obj
->Offset
[i
];
81 assert(start
% 4 == 0);
82 end
= ALIGN(start
+ xfb_obj
->Size
[i
], 4);
83 assert(end
<= bo
->size
);
86 OUT_BATCH(_3DSTATE_SO_BUFFER
<< 16 | (4 - 2));
87 OUT_BATCH((i
<< SO_BUFFER_INDEX_SHIFT
) | stride
);
88 OUT_RELOC(bo
, I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
, start
);
89 OUT_RELOC(bo
, I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
, end
);
95 * Outputs the 3DSTATE_SO_DECL_LIST command.
97 * The data output is a series of 64-bit entries containing a SO_DECL per
98 * stream. We only have one stream of rendering coming out of the GS unit, so
99 * we only emit stream 0 (low 16 bits) SO_DECLs.
102 upload_3dstate_so_decl_list(struct brw_context
*brw
,
103 const struct brw_vue_map
*vue_map
)
105 struct intel_context
*intel
= &brw
->intel
;
106 struct gl_context
*ctx
= &intel
->ctx
;
107 /* BRW_NEW_VERTEX_PROGRAM */
108 const struct gl_shader_program
*vs_prog
=
109 ctx
->Shader
.CurrentVertexProgram
;
110 /* BRW_NEW_TRANSFORM_FEEDBACK */
111 const struct gl_transform_feedback_info
*linked_xfb_info
=
112 &vs_prog
->LinkedTransformFeedback
;
114 uint16_t so_decl
[128];
116 int next_offset
[4] = {0, 0, 0, 0};
118 STATIC_ASSERT(ARRAY_SIZE(so_decl
) >= MAX_PROGRAM_OUTPUTS
);
120 /* Construct the list of SO_DECLs to be emitted. The formatting of the
121 * command is feels strange -- each dword pair contains a SO_DECL per stream.
123 for (i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
124 int buffer
= linked_xfb_info
->Outputs
[i
].OutputBuffer
;
126 int varying
= linked_xfb_info
->Outputs
[i
].OutputRegister
;
127 unsigned component_mask
=
128 (1 << linked_xfb_info
->Outputs
[i
].NumComponents
) - 1;
130 /* gl_PointSize is stored in VARYING_SLOT_PSIZ.w. */
131 if (varying
== VARYING_SLOT_PSIZ
) {
132 assert(linked_xfb_info
->Outputs
[i
].NumComponents
== 1);
133 component_mask
<<= 3;
135 component_mask
<<= linked_xfb_info
->Outputs
[i
].ComponentOffset
;
138 buffer_mask
|= 1 << buffer
;
140 decl
|= buffer
<< SO_DECL_OUTPUT_BUFFER_SLOT_SHIFT
;
141 decl
|= vue_map
->varying_to_slot
[varying
] <<
142 SO_DECL_REGISTER_INDEX_SHIFT
;
143 decl
|= component_mask
<< SO_DECL_COMPONENT_MASK_SHIFT
;
145 /* This assert should be true until GL_ARB_transform_feedback_instanced
146 * is added and we start using the hole flag.
148 assert(linked_xfb_info
->Outputs
[i
].DstOffset
== next_offset
[buffer
]);
150 next_offset
[buffer
] += linked_xfb_info
->Outputs
[i
].NumComponents
;
155 BEGIN_BATCH(linked_xfb_info
->NumOutputs
* 2 + 3);
156 OUT_BATCH(_3DSTATE_SO_DECL_LIST
<< 16 |
157 (linked_xfb_info
->NumOutputs
* 2 + 1));
159 OUT_BATCH((buffer_mask
<< SO_STREAM_TO_BUFFER_SELECTS_0_SHIFT
) |
160 (0 << SO_STREAM_TO_BUFFER_SELECTS_1_SHIFT
) |
161 (0 << SO_STREAM_TO_BUFFER_SELECTS_2_SHIFT
) |
162 (0 << SO_STREAM_TO_BUFFER_SELECTS_3_SHIFT
));
164 OUT_BATCH((linked_xfb_info
->NumOutputs
<< SO_NUM_ENTRIES_0_SHIFT
) |
165 (0 << SO_NUM_ENTRIES_1_SHIFT
) |
166 (0 << SO_NUM_ENTRIES_2_SHIFT
) |
167 (0 << SO_NUM_ENTRIES_3_SHIFT
));
169 for (i
= 0; i
< linked_xfb_info
->NumOutputs
; i
++) {
170 OUT_BATCH(so_decl
[i
]);
178 upload_3dstate_streamout(struct brw_context
*brw
, bool active
,
179 const struct brw_vue_map
*vue_map
)
181 struct intel_context
*intel
= &brw
->intel
;
182 struct gl_context
*ctx
= &intel
->ctx
;
183 /* BRW_NEW_TRANSFORM_FEEDBACK */
184 struct gl_transform_feedback_object
*xfb_obj
=
185 ctx
->TransformFeedback
.CurrentObject
;
186 uint32_t dw1
= 0, dw2
= 0;
190 int urb_entry_read_offset
= 0;
191 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
192 urb_entry_read_offset
;
194 dw1
|= SO_FUNCTION_ENABLE
;
195 dw1
|= SO_STATISTICS_ENABLE
;
198 if (ctx
->Light
.ProvokingVertex
!= GL_FIRST_VERTEX_CONVENTION
)
199 dw1
|= SO_REORDER_TRAILING
;
201 for (i
= 0; i
< 4; i
++) {
202 if (xfb_obj
->Buffers
[i
]) {
203 dw1
|= SO_BUFFER_ENABLE(i
);
207 /* We always read the whole vertex. This could be reduced at some
208 * point by reading less and offsetting the register index in the
211 dw2
|= urb_entry_read_offset
<< SO_STREAM_0_VERTEX_READ_OFFSET_SHIFT
;
212 dw2
|= (urb_entry_read_length
- 1) <<
213 SO_STREAM_0_VERTEX_READ_LENGTH_SHIFT
;
217 OUT_BATCH(_3DSTATE_STREAMOUT
<< 16 | (3 - 2));
224 upload_sol_state(struct brw_context
*brw
)
226 struct intel_context
*intel
= &brw
->intel
;
227 struct gl_context
*ctx
= &intel
->ctx
;
228 /* BRW_NEW_TRANSFORM_FEEDBACK */
229 bool active
= _mesa_is_xfb_active_and_unpaused(ctx
);
232 upload_3dstate_so_buffers(brw
);
233 /* BRW_NEW_VUE_MAP_GEOM_OUT */
234 upload_3dstate_so_decl_list(brw
, &brw
->vue_map_geom_out
);
237 /* Finally, set up the SOL stage. This command must always follow updates to
238 * the nonpipelined SOL state (3DSTATE_SO_BUFFER, 3DSTATE_SO_DECL_LIST) or
239 * MMIO register updates (current performed by the kernel at each batch
242 upload_3dstate_streamout(brw
, active
, &brw
->vue_map_geom_out
);
245 const struct brw_tracked_state gen7_sol_state
= {
247 .mesa
= (_NEW_LIGHT
),
248 .brw
= (BRW_NEW_BATCH
|
249 BRW_NEW_VERTEX_PROGRAM
|
250 BRW_NEW_VUE_MAP_GEOM_OUT
|
251 BRW_NEW_TRANSFORM_FEEDBACK
)
253 .emit
= upload_sol_state
,
257 gen7_begin_transform_feedback(struct gl_context
*ctx
, GLenum mode
,
258 struct gl_transform_feedback_object
*obj
)
260 struct brw_context
*brw
= brw_context(ctx
);
261 struct intel_context
*intel
= &brw
->intel
;
263 /* Reset the SOL buffer offset register. */
264 for (int i
= 0; i
< 4; i
++) {
266 OUT_BATCH(MI_LOAD_REGISTER_IMM
| (3 - 2));
267 OUT_BATCH(GEN7_SO_WRITE_OFFSET(i
));
274 gen7_end_transform_feedback(struct gl_context
*ctx
,
275 struct gl_transform_feedback_object
*obj
)
277 /* Because we have to rely on the kernel to reset our SO write offsets, and
278 * we only get to do it once per batchbuffer, flush the batch after feedback
279 * so another transform feedback can get the write offset reset it needs.
281 * This also covers any cache flushing required.
283 struct brw_context
*brw
= brw_context(ctx
);
284 struct intel_context
*intel
= &brw
->intel
;
286 intel_batchbuffer_flush(intel
);