1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "main/glheader.h"
31 #include "main/context.h"
32 #include "main/state.h"
33 #include "main/api_validate.h"
34 #include "main/enums.h"
37 #include "brw_defines.h"
38 #include "brw_context.h"
39 #include "brw_state.h"
40 #include "brw_fallback.h"
42 #include "intel_batchbuffer.h"
43 #include "intel_buffer_objects.h"
46 #include "vbo/vbo_context.h"
47 #include "swrast/swrast.h"
48 #include "swrast_setup/swrast_setup.h"
50 #define FILE_DEBUG_FLAG DEBUG_BATCH
52 static GLuint prim_to_hw_prim
[GL_POLYGON
+1] = {
66 static const GLenum reduced_prim
[GL_POLYGON
+1] = {
80 /* When the primitive changes, set a state bit and re-validate. Not
81 * the nicest and would rather deal with this by having all the
82 * programs be immune to the active primitive (ie. cope with all
83 * possibilities). That may not be realistic however.
85 static GLuint
brw_set_prim(struct brw_context
*brw
, GLenum prim
)
87 GLcontext
*ctx
= &brw
->intel
.ctx
;
89 if (INTEL_DEBUG
& DEBUG_PRIMS
)
90 _mesa_printf("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim
));
92 /* Slight optimization to avoid the GS program when not needed:
94 if (prim
== GL_QUAD_STRIP
&&
95 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
96 ctx
->Polygon
.FrontMode
== GL_FILL
&&
97 ctx
->Polygon
.BackMode
== GL_FILL
)
98 prim
= GL_TRIANGLE_STRIP
;
100 if (prim
!= brw
->primitive
) {
101 brw
->primitive
= prim
;
102 brw
->state
.dirty
.brw
|= BRW_NEW_PRIMITIVE
;
104 if (reduced_prim
[prim
] != brw
->intel
.reduced_primitive
) {
105 brw
->intel
.reduced_primitive
= reduced_prim
[prim
];
106 brw
->state
.dirty
.brw
|= BRW_NEW_REDUCED_PRIMITIVE
;
110 return prim_to_hw_prim
[prim
];
114 static GLuint
trim(GLenum prim
, GLuint length
)
116 if (prim
== GL_QUAD_STRIP
)
117 return length
> 3 ? (length
- length
% 2) : 0;
118 else if (prim
== GL_QUADS
)
119 return length
- length
% 4;
125 static void brw_emit_prim(struct brw_context
*brw
,
126 const struct _mesa_prim
*prim
,
129 struct brw_3d_primitive prim_packet
;
130 struct intel_context
*intel
= &brw
->intel
;
132 if (INTEL_DEBUG
& DEBUG_PRIMS
)
133 _mesa_printf("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim
->mode
),
134 prim
->start
, prim
->count
);
136 prim_packet
.header
.opcode
= CMD_3D_PRIM
;
137 prim_packet
.header
.length
= sizeof(prim_packet
)/4 - 2;
138 prim_packet
.header
.pad
= 0;
139 prim_packet
.header
.topology
= hw_prim
;
140 prim_packet
.header
.indexed
= prim
->indexed
;
142 prim_packet
.verts_per_instance
= trim(prim
->mode
, prim
->count
);
143 prim_packet
.start_vert_location
= prim
->start
;
144 prim_packet
.instance_count
= 1;
145 prim_packet
.start_instance_location
= 0;
146 prim_packet
.base_vert_location
= 0;
148 /* Can't wrap here, since we rely on the validated state. */
149 brw
->no_batch_wrap
= GL_TRUE
;
151 /* If we're set to always flush, do it before and after the primitive emit.
152 * We want to catch both missed flushes that hurt instruction/state cache
153 * and missed flushes of the render cache as it heads to other parts of
154 * the besides the draw code.
156 if (intel
->always_flush_cache
) {
157 BEGIN_BATCH(1, IGNORE_CLIPRECTS
);
158 OUT_BATCH(intel
->vtbl
.flush_cmd());
161 if (prim_packet
.verts_per_instance
) {
162 intel_batchbuffer_data( brw
->intel
.batch
, &prim_packet
,
163 sizeof(prim_packet
), LOOP_CLIPRECTS
);
165 if (intel
->always_flush_cache
) {
166 BEGIN_BATCH(1, IGNORE_CLIPRECTS
);
167 OUT_BATCH(intel
->vtbl
.flush_cmd());
171 brw
->no_batch_wrap
= GL_FALSE
;
174 static void brw_merge_inputs( struct brw_context
*brw
,
175 const struct gl_client_array
*arrays
[])
177 struct brw_vertex_info old
= brw
->vb
.info
;
180 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++)
181 dri_bo_unreference(brw
->vb
.inputs
[i
].bo
);
183 memset(&brw
->vb
.inputs
, 0, sizeof(brw
->vb
.inputs
));
184 memset(&brw
->vb
.info
, 0, sizeof(brw
->vb
.info
));
186 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
187 brw
->vb
.inputs
[i
].glarray
= arrays
[i
];
188 brw
->vb
.inputs
[i
].attrib
= (gl_vert_attrib
) i
;
190 if (arrays
[i
]->StrideB
!= 0)
191 brw
->vb
.info
.varying
|= 1 << i
;
193 brw
->vb
.info
.sizes
[i
/16] |= (brw
->vb
.inputs
[i
].glarray
->Size
- 1) <<
197 /* Raise statechanges if input sizes and varying have changed:
199 if (memcmp(brw
->vb
.info
.sizes
, old
.sizes
, sizeof(old
.sizes
)) != 0)
200 brw
->state
.dirty
.brw
|= BRW_NEW_INPUT_DIMENSIONS
;
202 if (brw
->vb
.info
.varying
!= old
.varying
)
203 brw
->state
.dirty
.brw
|= BRW_NEW_INPUT_VARYING
;
206 /* XXX: could split the primitive list to fallback only on the
207 * non-conformant primitives.
209 static GLboolean
check_fallbacks( struct brw_context
*brw
,
210 const struct _mesa_prim
*prim
,
213 GLcontext
*ctx
= &brw
->intel
.ctx
;
216 /* If we don't require strict OpenGL conformance, never
217 * use fallbacks. If we're forcing fallbacks, always
220 if (brw
->intel
.conformance_mode
== 0)
223 if (brw
->intel
.conformance_mode
== 2)
226 if (ctx
->Polygon
.SmoothFlag
) {
227 for (i
= 0; i
< nr_prims
; i
++)
228 if (reduced_prim
[prim
[i
].mode
] == GL_TRIANGLES
)
232 /* BRW hardware will do AA lines, but they are non-conformant it
233 * seems. TBD whether we keep this fallback:
235 if (ctx
->Line
.SmoothFlag
) {
236 for (i
= 0; i
< nr_prims
; i
++)
237 if (reduced_prim
[prim
[i
].mode
] == GL_LINES
)
241 /* Stipple -- these fallbacks could be resolved with a little
244 if (ctx
->Line
.StippleFlag
) {
245 for (i
= 0; i
< nr_prims
; i
++) {
246 /* GS doesn't get enough information to know when to reset
247 * the stipple counter?!?
249 if (prim
[i
].mode
== GL_LINE_LOOP
|| prim
[i
].mode
== GL_LINE_STRIP
)
252 if (prim
[i
].mode
== GL_POLYGON
&&
253 (ctx
->Polygon
.FrontMode
== GL_LINE
||
254 ctx
->Polygon
.BackMode
== GL_LINE
))
259 if (ctx
->Point
.SmoothFlag
) {
260 for (i
= 0; i
< nr_prims
; i
++)
261 if (prim
[i
].mode
== GL_POINTS
)
265 /* BRW hardware doesn't handle GL_CLAMP texturing correctly;
266 * brw_wm_sampler_state:translate_wrap_mode() treats GL_CLAMP
267 * as GL_CLAMP_TO_EDGE instead. If we're using GL_CLAMP, and
268 * we want strict conformance, force the fallback.
269 * Right now, we only do this for 2D textures.
273 for (u
= 0; u
< ctx
->Const
.MaxTextureCoordUnits
; u
++) {
274 struct gl_texture_unit
*texUnit
= &ctx
->Texture
.Unit
[u
];
275 if (texUnit
->Enabled
) {
276 if (texUnit
->Enabled
& TEXTURE_1D_BIT
) {
277 if (texUnit
->CurrentTex
[TEXTURE_1D_INDEX
]->WrapS
== GL_CLAMP
) {
281 if (texUnit
->Enabled
& TEXTURE_2D_BIT
) {
282 if (texUnit
->CurrentTex
[TEXTURE_2D_INDEX
]->WrapS
== GL_CLAMP
||
283 texUnit
->CurrentTex
[TEXTURE_2D_INDEX
]->WrapT
== GL_CLAMP
) {
287 if (texUnit
->Enabled
& TEXTURE_3D_BIT
) {
288 if (texUnit
->CurrentTex
[TEXTURE_3D_INDEX
]->WrapS
== GL_CLAMP
||
289 texUnit
->CurrentTex
[TEXTURE_3D_INDEX
]->WrapT
== GL_CLAMP
||
290 texUnit
->CurrentTex
[TEXTURE_3D_INDEX
]->WrapR
== GL_CLAMP
) {
298 /* Nothing stopping us from the fast path now */
302 /* May fail if out of video memory for texture or vbo upload, or on
303 * fallback conditions.
305 static GLboolean
brw_try_draw_prims( GLcontext
*ctx
,
306 const struct gl_client_array
*arrays
[],
307 const struct _mesa_prim
*prim
,
309 const struct _mesa_index_buffer
*ib
,
313 struct intel_context
*intel
= intel_context(ctx
);
314 struct brw_context
*brw
= brw_context(ctx
);
315 GLboolean retval
= GL_FALSE
;
316 GLboolean warn
= GL_FALSE
;
317 GLboolean first_time
= GL_TRUE
;
321 _mesa_update_state( ctx
);
323 /* We have to validate the textures *before* checking for fallbacks;
324 * otherwise, the software fallback won't be able to rely on the
325 * texture state, the firstLevel and lastLevel fields won't be
326 * set in the intel texture object (they'll both be 0), and the
327 * software fallback will segfault if it attempts to access any
328 * texture level other than level 0.
330 brw_validate_textures( brw
);
332 if (check_fallbacks(brw
, prim
, nr_prims
))
335 /* Bind all inputs, derive varying and size information:
337 brw_merge_inputs( brw
, arrays
);
340 brw
->state
.dirty
.brw
|= BRW_NEW_INDICES
;
342 brw
->vb
.min_index
= min_index
;
343 brw
->vb
.max_index
= max_index
;
344 brw
->state
.dirty
.brw
|= BRW_NEW_VERTICES
;
346 /* Have to validate state quite late. Will rebuild tnl_program,
347 * which depends on varying information.
349 * Note this is where brw->vs->prog_data.inputs_read is calculated,
350 * so can't access it earlier.
353 LOCK_HARDWARE(intel
);
355 if (!intel
->constant_cliprect
&& intel
->driDrawable
->numClipRects
== 0) {
356 UNLOCK_HARDWARE(intel
);
360 for (i
= 0; i
< nr_prims
; i
++) {
363 /* Flush the batch if it's approaching full, so that we don't wrap while
364 * we've got validated state that needs to be in the same batch as the
365 * primitives. This fraction is just a guess (minimal full state plus
366 * a primitive is around 512 bytes), and would be better if we had
367 * an upper bound of how much we might emit in a single
368 * brw_try_draw_prims().
370 intel_batchbuffer_require_space(intel
->batch
, intel
->batch
->size
/ 4,
373 hw_prim
= brw_set_prim(brw
, prim
[i
].mode
);
375 if (first_time
|| (brw
->state
.dirty
.brw
& BRW_NEW_PRIMITIVE
)) {
376 first_time
= GL_FALSE
;
378 brw_validate_state(brw
);
380 /* Various fallback checks: */
381 if (brw
->intel
.Fallback
)
384 /* Check that we can fit our state in with our existing batchbuffer, or
387 if (dri_bufmgr_check_aperture_space(brw
->state
.validated_bos
,
388 brw
->state
.validated_bo_count
)) {
389 static GLboolean warned
;
390 intel_batchbuffer_flush(intel
->batch
);
392 /* Validate the state after we flushed the batch (which would have
393 * changed the set of dirty state). If we still fail to
394 * check_aperture, warn of what's happening, but attempt to continue
395 * on since it may succeed anyway, and the user would probably rather
396 * see a failure and a warning than a fallback.
398 brw_validate_state(brw
);
400 dri_bufmgr_check_aperture_space(brw
->state
.validated_bos
,
401 brw
->state
.validated_bo_count
)) {
407 brw_upload_state(brw
);
410 brw_emit_prim(brw
, &prim
[i
], hw_prim
);
415 if (intel
->always_flush_batch
)
416 intel_batchbuffer_flush(intel
->batch
);
418 UNLOCK_HARDWARE(intel
);
421 fprintf(stderr
, "i965: Single primitive emit potentially exceeded "
422 "available aperture space\n");
425 DBG("%s failed\n", __FUNCTION__
);
430 static GLboolean
brw_need_rebase( GLcontext
*ctx
,
431 const struct gl_client_array
*arrays
[],
432 const struct _mesa_index_buffer
*ib
,
439 if (!vbo_all_varyings_in_vbos(arrays
))
445 /* Hmm. This isn't quite what I wanted. BRW can actually
446 * handle the mixed case well enough that we shouldn't need to
447 * rebase. However, it's probably not very common, nor hugely
448 * expensive to do it this way:
450 if (!vbo_all_varyings_in_vbos(arrays
))
458 void brw_draw_prims( GLcontext
*ctx
,
459 const struct gl_client_array
*arrays
[],
460 const struct _mesa_prim
*prim
,
462 const struct _mesa_index_buffer
*ib
,
468 /* Decide if we want to rebase. If so we end up recursing once
469 * only into this function.
471 if (brw_need_rebase( ctx
, arrays
, ib
, min_index
)) {
472 vbo_rebase_prims( ctx
, arrays
,
474 ib
, min_index
, max_index
,
480 /* Make a first attempt at drawing:
482 retval
= brw_try_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
484 /* Otherwise, we really are out of memory. Pass the drawing
485 * command to the software tnl module and which will in turn call
486 * swrast to do the drawing.
489 _swsetup_Wakeup(ctx
);
490 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
495 void brw_draw_init( struct brw_context
*brw
)
497 GLcontext
*ctx
= &brw
->intel
.ctx
;
498 struct vbo_context
*vbo
= vbo_context(ctx
);
500 /* Register our drawing function:
502 vbo
->draw_prims
= brw_draw_prims
;
505 void brw_draw_destroy( struct brw_context
*brw
)
509 if (brw
->vb
.upload
.bo
!= NULL
) {
510 dri_bo_unreference(brw
->vb
.upload
.bo
);
511 brw
->vb
.upload
.bo
= NULL
;
514 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
515 dri_bo_unreference(brw
->vb
.inputs
[i
].bo
);
516 brw
->vb
.inputs
[i
].bo
= NULL
;
519 dri_bo_unreference(brw
->ib
.bo
);