Merge remote branch 'origin/master' into pipe-video
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/enums.h"
33 #include "tnl/tnl.h"
34 #include "vbo/vbo_context.h"
35 #include "swrast/swrast.h"
36 #include "swrast_setup/swrast_setup.h"
37
38 #include "brw_draw.h"
39 #include "brw_defines.h"
40 #include "brw_context.h"
41 #include "brw_state.h"
42
43 #include "intel_batchbuffer.h"
44
45 #define FILE_DEBUG_FLAG DEBUG_PRIMS
46
47 static GLuint prim_to_hw_prim[GL_POLYGON+1] = {
48 _3DPRIM_POINTLIST,
49 _3DPRIM_LINELIST,
50 _3DPRIM_LINELOOP,
51 _3DPRIM_LINESTRIP,
52 _3DPRIM_TRILIST,
53 _3DPRIM_TRISTRIP,
54 _3DPRIM_TRIFAN,
55 _3DPRIM_QUADLIST,
56 _3DPRIM_QUADSTRIP,
57 _3DPRIM_POLYGON
58 };
59
60
61 static const GLenum reduced_prim[GL_POLYGON+1] = {
62 GL_POINTS,
63 GL_LINES,
64 GL_LINES,
65 GL_LINES,
66 GL_TRIANGLES,
67 GL_TRIANGLES,
68 GL_TRIANGLES,
69 GL_TRIANGLES,
70 GL_TRIANGLES,
71 GL_TRIANGLES
72 };
73
74
75 /* When the primitive changes, set a state bit and re-validate. Not
76 * the nicest and would rather deal with this by having all the
77 * programs be immune to the active primitive (ie. cope with all
78 * possibilities). That may not be realistic however.
79 */
80 static GLuint brw_set_prim(struct brw_context *brw,
81 const struct _mesa_prim *prim)
82 {
83 struct gl_context *ctx = &brw->intel.ctx;
84 GLenum mode = prim->mode;
85
86 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
87
88 /* Slight optimization to avoid the GS program when not needed:
89 */
90 if (mode == GL_QUAD_STRIP &&
91 ctx->Light.ShadeModel != GL_FLAT &&
92 ctx->Polygon.FrontMode == GL_FILL &&
93 ctx->Polygon.BackMode == GL_FILL)
94 mode = GL_TRIANGLE_STRIP;
95
96 if (prim->mode == GL_QUADS && prim->count == 4 &&
97 ctx->Light.ShadeModel != GL_FLAT &&
98 ctx->Polygon.FrontMode == GL_FILL &&
99 ctx->Polygon.BackMode == GL_FILL) {
100 mode = GL_TRIANGLE_FAN;
101 }
102
103 if (mode != brw->primitive) {
104 brw->primitive = mode;
105 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
106
107 if (reduced_prim[mode] != brw->intel.reduced_primitive) {
108 brw->intel.reduced_primitive = reduced_prim[mode];
109 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
110 }
111 }
112
113 return prim_to_hw_prim[mode];
114 }
115
116
117 static GLuint trim(GLenum prim, GLuint length)
118 {
119 if (prim == GL_QUAD_STRIP)
120 return length > 3 ? (length - length % 2) : 0;
121 else if (prim == GL_QUADS)
122 return length - length % 4;
123 else
124 return length;
125 }
126
127
128 static void brw_emit_prim(struct brw_context *brw,
129 const struct _mesa_prim *prim,
130 uint32_t hw_prim)
131 {
132 struct intel_context *intel = &brw->intel;
133 int verts_per_instance;
134 int vertex_access_type;
135 int start_vertex_location;
136 int base_vertex_location;
137
138 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
139 prim->start, prim->count);
140
141 start_vertex_location = prim->start;
142 base_vertex_location = prim->basevertex;
143 if (prim->indexed) {
144 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
145 start_vertex_location += brw->ib.start_vertex_offset;
146 base_vertex_location += brw->vb.start_vertex_bias;
147 } else {
148 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
149 start_vertex_location += brw->vb.start_vertex_bias;
150 }
151
152 verts_per_instance = trim(prim->mode, prim->count);
153
154 /* If nothing to emit, just return. */
155 if (verts_per_instance == 0)
156 return;
157
158 /* If we're set to always flush, do it before and after the primitive emit.
159 * We want to catch both missed flushes that hurt instruction/state cache
160 * and missed flushes of the render cache as it heads to other parts of
161 * the besides the draw code.
162 */
163 if (intel->always_flush_cache) {
164 intel_batchbuffer_emit_mi_flush(intel);
165 }
166
167 BEGIN_BATCH(6);
168 OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
169 hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
170 vertex_access_type);
171 OUT_BATCH(verts_per_instance);
172 OUT_BATCH(start_vertex_location);
173 OUT_BATCH(1); // instance count
174 OUT_BATCH(0); // start instance location
175 OUT_BATCH(base_vertex_location);
176 ADVANCE_BATCH();
177
178 if (intel->always_flush_cache) {
179 intel_batchbuffer_emit_mi_flush(intel);
180 }
181 }
182
183 static void brw_merge_inputs( struct brw_context *brw,
184 const struct gl_client_array *arrays[])
185 {
186 struct brw_vertex_info old = brw->vb.info;
187 GLuint i;
188
189 for (i = 0; i < brw->vb.nr_buffers; i++) {
190 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
191 brw->vb.buffers[i].bo = NULL;
192 }
193 brw->vb.nr_buffers = 0;
194
195 memset(&brw->vb.info, 0, sizeof(brw->vb.info));
196
197 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
198 brw->vb.inputs[i].buffer = -1;
199 brw->vb.inputs[i].glarray = arrays[i];
200 brw->vb.inputs[i].attrib = (gl_vert_attrib) i;
201
202 if (arrays[i]->StrideB != 0)
203 brw->vb.info.sizes[i/16] |= (brw->vb.inputs[i].glarray->Size - 1) <<
204 ((i%16) * 2);
205 }
206
207 /* Raise statechanges if input sizes have changed. */
208 if (memcmp(brw->vb.info.sizes, old.sizes, sizeof(old.sizes)) != 0)
209 brw->state.dirty.brw |= BRW_NEW_INPUT_DIMENSIONS;
210 }
211
212 /* XXX: could split the primitive list to fallback only on the
213 * non-conformant primitives.
214 */
215 static GLboolean check_fallbacks( struct brw_context *brw,
216 const struct _mesa_prim *prim,
217 GLuint nr_prims )
218 {
219 struct gl_context *ctx = &brw->intel.ctx;
220 GLuint i;
221
222 /* If we don't require strict OpenGL conformance, never
223 * use fallbacks. If we're forcing fallbacks, always
224 * use fallfacks.
225 */
226 if (brw->intel.conformance_mode == 0)
227 return GL_FALSE;
228
229 if (brw->intel.conformance_mode == 2)
230 return GL_TRUE;
231
232 if (ctx->Polygon.SmoothFlag) {
233 for (i = 0; i < nr_prims; i++)
234 if (reduced_prim[prim[i].mode] == GL_TRIANGLES)
235 return GL_TRUE;
236 }
237
238 /* BRW hardware will do AA lines, but they are non-conformant it
239 * seems. TBD whether we keep this fallback:
240 */
241 if (ctx->Line.SmoothFlag) {
242 for (i = 0; i < nr_prims; i++)
243 if (reduced_prim[prim[i].mode] == GL_LINES)
244 return GL_TRUE;
245 }
246
247 /* Stipple -- these fallbacks could be resolved with a little
248 * bit of work?
249 */
250 if (ctx->Line.StippleFlag) {
251 for (i = 0; i < nr_prims; i++) {
252 /* GS doesn't get enough information to know when to reset
253 * the stipple counter?!?
254 */
255 if (prim[i].mode == GL_LINE_LOOP || prim[i].mode == GL_LINE_STRIP)
256 return GL_TRUE;
257
258 if (prim[i].mode == GL_POLYGON &&
259 (ctx->Polygon.FrontMode == GL_LINE ||
260 ctx->Polygon.BackMode == GL_LINE))
261 return GL_TRUE;
262 }
263 }
264
265 if (ctx->Point.SmoothFlag) {
266 for (i = 0; i < nr_prims; i++)
267 if (prim[i].mode == GL_POINTS)
268 return GL_TRUE;
269 }
270
271 /* BRW hardware doesn't handle GL_CLAMP texturing correctly;
272 * brw_wm_sampler_state:translate_wrap_mode() treats GL_CLAMP
273 * as GL_CLAMP_TO_EDGE instead. If we're using GL_CLAMP, and
274 * we want strict conformance, force the fallback.
275 * Right now, we only do this for 2D textures.
276 */
277 {
278 int u;
279 for (u = 0; u < ctx->Const.MaxTextureCoordUnits; u++) {
280 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[u];
281 if (texUnit->Enabled) {
282 if (texUnit->Enabled & TEXTURE_1D_BIT) {
283 if (texUnit->CurrentTex[TEXTURE_1D_INDEX]->Sampler.WrapS == GL_CLAMP) {
284 return GL_TRUE;
285 }
286 }
287 if (texUnit->Enabled & TEXTURE_2D_BIT) {
288 if (texUnit->CurrentTex[TEXTURE_2D_INDEX]->Sampler.WrapS == GL_CLAMP ||
289 texUnit->CurrentTex[TEXTURE_2D_INDEX]->Sampler.WrapT == GL_CLAMP) {
290 return GL_TRUE;
291 }
292 }
293 if (texUnit->Enabled & TEXTURE_3D_BIT) {
294 if (texUnit->CurrentTex[TEXTURE_3D_INDEX]->Sampler.WrapS == GL_CLAMP ||
295 texUnit->CurrentTex[TEXTURE_3D_INDEX]->Sampler.WrapT == GL_CLAMP ||
296 texUnit->CurrentTex[TEXTURE_3D_INDEX]->Sampler.WrapR == GL_CLAMP) {
297 return GL_TRUE;
298 }
299 }
300 }
301 }
302 }
303
304 /* Nothing stopping us from the fast path now */
305 return GL_FALSE;
306 }
307
308 /* May fail if out of video memory for texture or vbo upload, or on
309 * fallback conditions.
310 */
311 static GLboolean brw_try_draw_prims( struct gl_context *ctx,
312 const struct gl_client_array *arrays[],
313 const struct _mesa_prim *prim,
314 GLuint nr_prims,
315 const struct _mesa_index_buffer *ib,
316 GLuint min_index,
317 GLuint max_index )
318 {
319 struct intel_context *intel = intel_context(ctx);
320 struct brw_context *brw = brw_context(ctx);
321 GLboolean retval = GL_FALSE;
322 GLboolean warn = GL_FALSE;
323 GLuint i;
324
325 if (ctx->NewState)
326 _mesa_update_state( ctx );
327
328 /* We have to validate the textures *before* checking for fallbacks;
329 * otherwise, the software fallback won't be able to rely on the
330 * texture state, the firstLevel and lastLevel fields won't be
331 * set in the intel texture object (they'll both be 0), and the
332 * software fallback will segfault if it attempts to access any
333 * texture level other than level 0.
334 */
335 brw_validate_textures( brw );
336
337 if (check_fallbacks(brw, prim, nr_prims))
338 return GL_FALSE;
339
340 /* Bind all inputs, derive varying and size information:
341 */
342 brw_merge_inputs( brw, arrays );
343
344 brw->ib.ib = ib;
345 brw->state.dirty.brw |= BRW_NEW_INDICES;
346
347 brw->vb.min_index = min_index;
348 brw->vb.max_index = max_index;
349 brw->state.dirty.brw |= BRW_NEW_VERTICES;
350
351 /* Have to validate state quite late. Will rebuild tnl_program,
352 * which depends on varying information.
353 *
354 * Note this is where brw->vs->prog_data.inputs_read is calculated,
355 * so can't access it earlier.
356 */
357
358 intel_prepare_render(intel);
359
360 for (i = 0; i < nr_prims; i++) {
361 uint32_t hw_prim;
362
363 /* Flush the batch if it's approaching full, so that we don't wrap while
364 * we've got validated state that needs to be in the same batch as the
365 * primitives. This fraction is just a guess (minimal full state plus
366 * a primitive is around 512 bytes), and would be better if we had
367 * an upper bound of how much we might emit in a single
368 * brw_try_draw_prims().
369 */
370 intel_batchbuffer_require_space(intel, 1024, false);
371
372 hw_prim = brw_set_prim(brw, &prim[i]);
373 if (brw->state.dirty.brw) {
374 brw_validate_state(brw);
375
376 /* Various fallback checks: */
377 if (brw->intel.Fallback)
378 goto out;
379
380 /* Check that we can fit our state in with our existing batchbuffer, or
381 * flush otherwise.
382 */
383 if (dri_bufmgr_check_aperture_space(brw->state.validated_bos,
384 brw->state.validated_bo_count)) {
385 static GLboolean warned;
386 intel_batchbuffer_flush(intel);
387
388 /* Validate the state after we flushed the batch (which would have
389 * changed the set of dirty state). If we still fail to
390 * check_aperture, warn of what's happening, but attempt to continue
391 * on since it may succeed anyway, and the user would probably rather
392 * see a failure and a warning than a fallback.
393 */
394 brw_validate_state(brw);
395 if (!warned &&
396 dri_bufmgr_check_aperture_space(brw->state.validated_bos,
397 brw->state.validated_bo_count)) {
398 warn = GL_TRUE;
399 warned = GL_TRUE;
400 }
401 }
402
403 intel->no_batch_wrap = GL_TRUE;
404 brw_upload_state(brw);
405 }
406
407 brw_emit_prim(brw, &prim[i], hw_prim);
408
409 intel->no_batch_wrap = GL_FALSE;
410
411 retval = GL_TRUE;
412 }
413
414 if (intel->always_flush_batch)
415 intel_batchbuffer_flush(intel);
416 out:
417
418 brw_state_cache_check_size(brw);
419
420 if (warn)
421 fprintf(stderr, "i965: Single primitive emit potentially exceeded "
422 "available aperture space\n");
423
424 if (!retval)
425 DBG("%s failed\n", __FUNCTION__);
426
427 return retval;
428 }
429
430 void brw_draw_prims( struct gl_context *ctx,
431 const struct gl_client_array *arrays[],
432 const struct _mesa_prim *prim,
433 GLuint nr_prims,
434 const struct _mesa_index_buffer *ib,
435 GLboolean index_bounds_valid,
436 GLuint min_index,
437 GLuint max_index )
438 {
439 GLboolean retval;
440
441 if (!vbo_all_varyings_in_vbos(arrays)) {
442 if (!index_bounds_valid)
443 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
444
445 /* Decide if we want to rebase. If so we end up recursing once
446 * only into this function.
447 */
448 if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) {
449 vbo_rebase_prims(ctx, arrays,
450 prim, nr_prims,
451 ib, min_index, max_index,
452 brw_draw_prims );
453 return;
454 }
455 }
456
457 /* Make a first attempt at drawing:
458 */
459 retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
460
461 /* Otherwise, we really are out of memory. Pass the drawing
462 * command to the software tnl module and which will in turn call
463 * swrast to do the drawing.
464 */
465 if (!retval) {
466 _swsetup_Wakeup(ctx);
467 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
468 }
469
470 }
471
472 void brw_draw_init( struct brw_context *brw )
473 {
474 struct gl_context *ctx = &brw->intel.ctx;
475 struct vbo_context *vbo = vbo_context(ctx);
476 int i;
477
478 /* Register our drawing function:
479 */
480 vbo->draw_prims = brw_draw_prims;
481
482 for (i = 0; i < VERT_ATTRIB_MAX; i++)
483 brw->vb.inputs[i].buffer = -1;
484 brw->vb.nr_buffers = 0;
485 brw->vb.nr_enabled = 0;
486 }
487
488 void brw_draw_destroy( struct brw_context *brw )
489 {
490 int i;
491
492 for (i = 0; i < brw->vb.nr_buffers; i++) {
493 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
494 brw->vb.buffers[i].bo = NULL;
495 }
496 brw->vb.nr_buffers = 0;
497
498 for (i = 0; i < brw->vb.nr_enabled; i++) {
499 brw->vb.enabled[i]->buffer = -1;
500 }
501 brw->vb.nr_enabled = 0;
502
503 drm_intel_bo_unreference(brw->ib.bo);
504 brw->ib.bo = NULL;
505 }