1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/enums.h"
34 #include "vbo/vbo_context.h"
35 #include "swrast/swrast.h"
36 #include "swrast_setup/swrast_setup.h"
39 #include "brw_defines.h"
40 #include "brw_context.h"
41 #include "brw_state.h"
43 #include "intel_batchbuffer.h"
45 #define FILE_DEBUG_FLAG DEBUG_PRIMS
47 static GLuint prim_to_hw_prim
[GL_POLYGON
+1] = {
61 static const GLenum reduced_prim
[GL_POLYGON
+1] = {
75 /* When the primitive changes, set a state bit and re-validate. Not
76 * the nicest and would rather deal with this by having all the
77 * programs be immune to the active primitive (ie. cope with all
78 * possibilities). That may not be realistic however.
80 static GLuint
brw_set_prim(struct brw_context
*brw
,
81 const struct _mesa_prim
*prim
)
83 struct gl_context
*ctx
= &brw
->intel
.ctx
;
84 GLenum mode
= prim
->mode
;
86 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim
->mode
));
88 /* Slight optimization to avoid the GS program when not needed:
90 if (mode
== GL_QUAD_STRIP
&&
91 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
92 ctx
->Polygon
.FrontMode
== GL_FILL
&&
93 ctx
->Polygon
.BackMode
== GL_FILL
)
94 mode
= GL_TRIANGLE_STRIP
;
96 if (prim
->mode
== GL_QUADS
&& prim
->count
== 4 &&
97 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
98 ctx
->Polygon
.FrontMode
== GL_FILL
&&
99 ctx
->Polygon
.BackMode
== GL_FILL
) {
100 mode
= GL_TRIANGLE_FAN
;
103 if (mode
!= brw
->primitive
) {
104 brw
->primitive
= mode
;
105 brw
->state
.dirty
.brw
|= BRW_NEW_PRIMITIVE
;
107 if (reduced_prim
[mode
] != brw
->intel
.reduced_primitive
) {
108 brw
->intel
.reduced_primitive
= reduced_prim
[mode
];
109 brw
->state
.dirty
.brw
|= BRW_NEW_REDUCED_PRIMITIVE
;
113 return prim_to_hw_prim
[mode
];
117 static GLuint
trim(GLenum prim
, GLuint length
)
119 if (prim
== GL_QUAD_STRIP
)
120 return length
> 3 ? (length
- length
% 2) : 0;
121 else if (prim
== GL_QUADS
)
122 return length
- length
% 4;
128 static void brw_emit_prim(struct brw_context
*brw
,
129 const struct _mesa_prim
*prim
,
132 struct intel_context
*intel
= &brw
->intel
;
133 int verts_per_instance
;
134 int vertex_access_type
;
135 int start_vertex_location
;
136 int base_vertex_location
;
138 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim
->mode
),
139 prim
->start
, prim
->count
);
141 start_vertex_location
= prim
->start
;
142 base_vertex_location
= prim
->basevertex
;
144 vertex_access_type
= GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
;
145 start_vertex_location
+= brw
->ib
.start_vertex_offset
;
146 base_vertex_location
+= brw
->vb
.start_vertex_bias
;
148 vertex_access_type
= GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
;
149 start_vertex_location
+= brw
->vb
.start_vertex_bias
;
152 verts_per_instance
= trim(prim
->mode
, prim
->count
);
154 /* If nothing to emit, just return. */
155 if (verts_per_instance
== 0)
158 /* If we're set to always flush, do it before and after the primitive emit.
159 * We want to catch both missed flushes that hurt instruction/state cache
160 * and missed flushes of the render cache as it heads to other parts of
161 * the besides the draw code.
163 if (intel
->always_flush_cache
) {
164 intel_batchbuffer_emit_mi_flush(intel
);
168 OUT_BATCH(CMD_3D_PRIM
<< 16 | (6 - 2) |
169 hw_prim
<< GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT
|
171 OUT_BATCH(verts_per_instance
);
172 OUT_BATCH(start_vertex_location
);
173 OUT_BATCH(1); // instance count
174 OUT_BATCH(0); // start instance location
175 OUT_BATCH(base_vertex_location
);
178 if (intel
->always_flush_cache
) {
179 intel_batchbuffer_emit_mi_flush(intel
);
183 static void brw_merge_inputs( struct brw_context
*brw
,
184 const struct gl_client_array
*arrays
[])
186 struct brw_vertex_info old
= brw
->vb
.info
;
189 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
190 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
191 brw
->vb
.buffers
[i
].bo
= NULL
;
193 brw
->vb
.nr_buffers
= 0;
195 memset(&brw
->vb
.info
, 0, sizeof(brw
->vb
.info
));
197 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
198 brw
->vb
.inputs
[i
].buffer
= -1;
199 brw
->vb
.inputs
[i
].glarray
= arrays
[i
];
200 brw
->vb
.inputs
[i
].attrib
= (gl_vert_attrib
) i
;
202 if (arrays
[i
]->StrideB
!= 0)
203 brw
->vb
.info
.sizes
[i
/16] |= (brw
->vb
.inputs
[i
].glarray
->Size
- 1) <<
207 /* Raise statechanges if input sizes have changed. */
208 if (memcmp(brw
->vb
.info
.sizes
, old
.sizes
, sizeof(old
.sizes
)) != 0)
209 brw
->state
.dirty
.brw
|= BRW_NEW_INPUT_DIMENSIONS
;
212 /* XXX: could split the primitive list to fallback only on the
213 * non-conformant primitives.
215 static GLboolean
check_fallbacks( struct brw_context
*brw
,
216 const struct _mesa_prim
*prim
,
219 struct gl_context
*ctx
= &brw
->intel
.ctx
;
222 /* If we don't require strict OpenGL conformance, never
223 * use fallbacks. If we're forcing fallbacks, always
226 if (brw
->intel
.conformance_mode
== 0)
229 if (brw
->intel
.conformance_mode
== 2)
232 if (ctx
->Polygon
.SmoothFlag
) {
233 for (i
= 0; i
< nr_prims
; i
++)
234 if (reduced_prim
[prim
[i
].mode
] == GL_TRIANGLES
)
238 /* BRW hardware will do AA lines, but they are non-conformant it
239 * seems. TBD whether we keep this fallback:
241 if (ctx
->Line
.SmoothFlag
) {
242 for (i
= 0; i
< nr_prims
; i
++)
243 if (reduced_prim
[prim
[i
].mode
] == GL_LINES
)
247 /* Stipple -- these fallbacks could be resolved with a little
250 if (ctx
->Line
.StippleFlag
) {
251 for (i
= 0; i
< nr_prims
; i
++) {
252 /* GS doesn't get enough information to know when to reset
253 * the stipple counter?!?
255 if (prim
[i
].mode
== GL_LINE_LOOP
|| prim
[i
].mode
== GL_LINE_STRIP
)
258 if (prim
[i
].mode
== GL_POLYGON
&&
259 (ctx
->Polygon
.FrontMode
== GL_LINE
||
260 ctx
->Polygon
.BackMode
== GL_LINE
))
265 if (ctx
->Point
.SmoothFlag
) {
266 for (i
= 0; i
< nr_prims
; i
++)
267 if (prim
[i
].mode
== GL_POINTS
)
271 /* BRW hardware doesn't handle GL_CLAMP texturing correctly;
272 * brw_wm_sampler_state:translate_wrap_mode() treats GL_CLAMP
273 * as GL_CLAMP_TO_EDGE instead. If we're using GL_CLAMP, and
274 * we want strict conformance, force the fallback.
275 * Right now, we only do this for 2D textures.
279 for (u
= 0; u
< ctx
->Const
.MaxTextureCoordUnits
; u
++) {
280 struct gl_texture_unit
*texUnit
= &ctx
->Texture
.Unit
[u
];
281 if (texUnit
->Enabled
) {
282 if (texUnit
->Enabled
& TEXTURE_1D_BIT
) {
283 if (texUnit
->CurrentTex
[TEXTURE_1D_INDEX
]->Sampler
.WrapS
== GL_CLAMP
) {
287 if (texUnit
->Enabled
& TEXTURE_2D_BIT
) {
288 if (texUnit
->CurrentTex
[TEXTURE_2D_INDEX
]->Sampler
.WrapS
== GL_CLAMP
||
289 texUnit
->CurrentTex
[TEXTURE_2D_INDEX
]->Sampler
.WrapT
== GL_CLAMP
) {
293 if (texUnit
->Enabled
& TEXTURE_3D_BIT
) {
294 if (texUnit
->CurrentTex
[TEXTURE_3D_INDEX
]->Sampler
.WrapS
== GL_CLAMP
||
295 texUnit
->CurrentTex
[TEXTURE_3D_INDEX
]->Sampler
.WrapT
== GL_CLAMP
||
296 texUnit
->CurrentTex
[TEXTURE_3D_INDEX
]->Sampler
.WrapR
== GL_CLAMP
) {
304 /* Nothing stopping us from the fast path now */
308 /* May fail if out of video memory for texture or vbo upload, or on
309 * fallback conditions.
311 static GLboolean
brw_try_draw_prims( struct gl_context
*ctx
,
312 const struct gl_client_array
*arrays
[],
313 const struct _mesa_prim
*prim
,
315 const struct _mesa_index_buffer
*ib
,
319 struct intel_context
*intel
= intel_context(ctx
);
320 struct brw_context
*brw
= brw_context(ctx
);
321 GLboolean retval
= GL_FALSE
;
322 GLboolean warn
= GL_FALSE
;
326 _mesa_update_state( ctx
);
328 /* We have to validate the textures *before* checking for fallbacks;
329 * otherwise, the software fallback won't be able to rely on the
330 * texture state, the firstLevel and lastLevel fields won't be
331 * set in the intel texture object (they'll both be 0), and the
332 * software fallback will segfault if it attempts to access any
333 * texture level other than level 0.
335 brw_validate_textures( brw
);
337 if (check_fallbacks(brw
, prim
, nr_prims
))
340 /* Bind all inputs, derive varying and size information:
342 brw_merge_inputs( brw
, arrays
);
345 brw
->state
.dirty
.brw
|= BRW_NEW_INDICES
;
347 brw
->vb
.min_index
= min_index
;
348 brw
->vb
.max_index
= max_index
;
349 brw
->state
.dirty
.brw
|= BRW_NEW_VERTICES
;
351 /* Have to validate state quite late. Will rebuild tnl_program,
352 * which depends on varying information.
354 * Note this is where brw->vs->prog_data.inputs_read is calculated,
355 * so can't access it earlier.
358 intel_prepare_render(intel
);
360 for (i
= 0; i
< nr_prims
; i
++) {
363 /* Flush the batch if it's approaching full, so that we don't wrap while
364 * we've got validated state that needs to be in the same batch as the
365 * primitives. This fraction is just a guess (minimal full state plus
366 * a primitive is around 512 bytes), and would be better if we had
367 * an upper bound of how much we might emit in a single
368 * brw_try_draw_prims().
370 intel_batchbuffer_require_space(intel
, 1024, false);
372 hw_prim
= brw_set_prim(brw
, &prim
[i
]);
373 if (brw
->state
.dirty
.brw
) {
374 brw_validate_state(brw
);
376 /* Various fallback checks: */
377 if (brw
->intel
.Fallback
)
380 /* Check that we can fit our state in with our existing batchbuffer, or
383 if (dri_bufmgr_check_aperture_space(brw
->state
.validated_bos
,
384 brw
->state
.validated_bo_count
)) {
385 static GLboolean warned
;
386 intel_batchbuffer_flush(intel
);
388 /* Validate the state after we flushed the batch (which would have
389 * changed the set of dirty state). If we still fail to
390 * check_aperture, warn of what's happening, but attempt to continue
391 * on since it may succeed anyway, and the user would probably rather
392 * see a failure and a warning than a fallback.
394 brw_validate_state(brw
);
396 dri_bufmgr_check_aperture_space(brw
->state
.validated_bos
,
397 brw
->state
.validated_bo_count
)) {
403 intel
->no_batch_wrap
= GL_TRUE
;
404 brw_upload_state(brw
);
407 brw_emit_prim(brw
, &prim
[i
], hw_prim
);
409 intel
->no_batch_wrap
= GL_FALSE
;
414 if (intel
->always_flush_batch
)
415 intel_batchbuffer_flush(intel
);
418 brw_state_cache_check_size(brw
);
421 fprintf(stderr
, "i965: Single primitive emit potentially exceeded "
422 "available aperture space\n");
425 DBG("%s failed\n", __FUNCTION__
);
430 void brw_draw_prims( struct gl_context
*ctx
,
431 const struct gl_client_array
*arrays
[],
432 const struct _mesa_prim
*prim
,
434 const struct _mesa_index_buffer
*ib
,
435 GLboolean index_bounds_valid
,
441 if (!vbo_all_varyings_in_vbos(arrays
)) {
442 if (!index_bounds_valid
)
443 vbo_get_minmax_index(ctx
, prim
, ib
, &min_index
, &max_index
);
445 /* Decide if we want to rebase. If so we end up recursing once
446 * only into this function.
448 if (min_index
!= 0 && !vbo_any_varyings_in_vbos(arrays
)) {
449 vbo_rebase_prims(ctx
, arrays
,
451 ib
, min_index
, max_index
,
457 /* Make a first attempt at drawing:
459 retval
= brw_try_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
461 /* Otherwise, we really are out of memory. Pass the drawing
462 * command to the software tnl module and which will in turn call
463 * swrast to do the drawing.
466 _swsetup_Wakeup(ctx
);
467 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
472 void brw_draw_init( struct brw_context
*brw
)
474 struct gl_context
*ctx
= &brw
->intel
.ctx
;
475 struct vbo_context
*vbo
= vbo_context(ctx
);
478 /* Register our drawing function:
480 vbo
->draw_prims
= brw_draw_prims
;
482 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++)
483 brw
->vb
.inputs
[i
].buffer
= -1;
484 brw
->vb
.nr_buffers
= 0;
485 brw
->vb
.nr_enabled
= 0;
488 void brw_draw_destroy( struct brw_context
*brw
)
492 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
493 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
494 brw
->vb
.buffers
[i
].bo
= NULL
;
496 brw
->vb
.nr_buffers
= 0;
498 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
499 brw
->vb
.enabled
[i
]->buffer
= -1;
501 brw
->vb
.nr_enabled
= 0;
503 drm_intel_bo_unreference(brw
->ib
.bo
);