Merge remote branch 'origin/master' into lp-setup-llvm
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/enums.h"
33 #include "tnl/tnl.h"
34 #include "vbo/vbo_context.h"
35 #include "swrast/swrast.h"
36 #include "swrast_setup/swrast_setup.h"
37
38 #include "brw_draw.h"
39 #include "brw_defines.h"
40 #include "brw_context.h"
41 #include "brw_state.h"
42
43 #include "intel_batchbuffer.h"
44
45 #define FILE_DEBUG_FLAG DEBUG_BATCH
46
47 static GLuint prim_to_hw_prim[GL_POLYGON+1] = {
48 _3DPRIM_POINTLIST,
49 _3DPRIM_LINELIST,
50 _3DPRIM_LINELOOP,
51 _3DPRIM_LINESTRIP,
52 _3DPRIM_TRILIST,
53 _3DPRIM_TRISTRIP,
54 _3DPRIM_TRIFAN,
55 _3DPRIM_QUADLIST,
56 _3DPRIM_QUADSTRIP,
57 _3DPRIM_POLYGON
58 };
59
60
61 static const GLenum reduced_prim[GL_POLYGON+1] = {
62 GL_POINTS,
63 GL_LINES,
64 GL_LINES,
65 GL_LINES,
66 GL_TRIANGLES,
67 GL_TRIANGLES,
68 GL_TRIANGLES,
69 GL_TRIANGLES,
70 GL_TRIANGLES,
71 GL_TRIANGLES
72 };
73
74
75 /* When the primitive changes, set a state bit and re-validate. Not
76 * the nicest and would rather deal with this by having all the
77 * programs be immune to the active primitive (ie. cope with all
78 * possibilities). That may not be realistic however.
79 */
80 static GLuint brw_set_prim(struct brw_context *brw,
81 const struct _mesa_prim *prim)
82 {
83 struct gl_context *ctx = &brw->intel.ctx;
84 GLenum mode = prim->mode;
85
86 if (INTEL_DEBUG & DEBUG_PRIMS)
87 printf("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
88
89 /* Slight optimization to avoid the GS program when not needed:
90 */
91 if (mode == GL_QUAD_STRIP &&
92 ctx->Light.ShadeModel != GL_FLAT &&
93 ctx->Polygon.FrontMode == GL_FILL &&
94 ctx->Polygon.BackMode == GL_FILL)
95 mode = GL_TRIANGLE_STRIP;
96
97 if (prim->mode == GL_QUADS && prim->count == 4 &&
98 ctx->Light.ShadeModel != GL_FLAT &&
99 ctx->Polygon.FrontMode == GL_FILL &&
100 ctx->Polygon.BackMode == GL_FILL) {
101 mode = GL_TRIANGLE_FAN;
102 }
103
104 if (mode != brw->primitive) {
105 brw->primitive = mode;
106 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
107
108 if (reduced_prim[mode] != brw->intel.reduced_primitive) {
109 brw->intel.reduced_primitive = reduced_prim[mode];
110 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
111 }
112 }
113
114 return prim_to_hw_prim[mode];
115 }
116
117
118 static GLuint trim(GLenum prim, GLuint length)
119 {
120 if (prim == GL_QUAD_STRIP)
121 return length > 3 ? (length - length % 2) : 0;
122 else if (prim == GL_QUADS)
123 return length - length % 4;
124 else
125 return length;
126 }
127
128
129 static void brw_emit_prim(struct brw_context *brw,
130 const struct _mesa_prim *prim,
131 uint32_t hw_prim)
132 {
133 struct brw_3d_primitive prim_packet;
134 struct intel_context *intel = &brw->intel;
135
136 if (INTEL_DEBUG & DEBUG_PRIMS)
137 printf("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
138 prim->start, prim->count);
139
140 prim_packet.header.opcode = CMD_3D_PRIM;
141 prim_packet.header.length = sizeof(prim_packet)/4 - 2;
142 prim_packet.header.pad = 0;
143 prim_packet.header.topology = hw_prim;
144 prim_packet.header.indexed = prim->indexed;
145
146 prim_packet.verts_per_instance = trim(prim->mode, prim->count);
147 prim_packet.start_vert_location = prim->start;
148 if (prim->indexed)
149 prim_packet.start_vert_location += brw->ib.start_vertex_offset;
150 prim_packet.instance_count = 1;
151 prim_packet.start_instance_location = 0;
152 prim_packet.base_vert_location = prim->basevertex;
153
154 /* If we're set to always flush, do it before and after the primitive emit.
155 * We want to catch both missed flushes that hurt instruction/state cache
156 * and missed flushes of the render cache as it heads to other parts of
157 * the besides the draw code.
158 */
159 if (intel->always_flush_cache) {
160 intel_batchbuffer_emit_mi_flush(intel->batch);
161 }
162 if (prim_packet.verts_per_instance) {
163 intel_batchbuffer_data( brw->intel.batch, &prim_packet,
164 sizeof(prim_packet));
165 }
166 if (intel->always_flush_cache) {
167 intel_batchbuffer_emit_mi_flush(intel->batch);
168 }
169 }
170
171 static void brw_merge_inputs( struct brw_context *brw,
172 const struct gl_client_array *arrays[])
173 {
174 struct brw_vertex_info old = brw->vb.info;
175 GLuint i;
176
177 for (i = 0; i < VERT_ATTRIB_MAX; i++)
178 drm_intel_bo_unreference(brw->vb.inputs[i].bo);
179
180 memset(&brw->vb.inputs, 0, sizeof(brw->vb.inputs));
181 memset(&brw->vb.info, 0, sizeof(brw->vb.info));
182
183 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
184 brw->vb.inputs[i].glarray = arrays[i];
185 brw->vb.inputs[i].attrib = (gl_vert_attrib) i;
186
187 if (arrays[i]->StrideB != 0)
188 brw->vb.info.sizes[i/16] |= (brw->vb.inputs[i].glarray->Size - 1) <<
189 ((i%16) * 2);
190 }
191
192 /* Raise statechanges if input sizes have changed. */
193 if (memcmp(brw->vb.info.sizes, old.sizes, sizeof(old.sizes)) != 0)
194 brw->state.dirty.brw |= BRW_NEW_INPUT_DIMENSIONS;
195 }
196
197 /* XXX: could split the primitive list to fallback only on the
198 * non-conformant primitives.
199 */
200 static GLboolean check_fallbacks( struct brw_context *brw,
201 const struct _mesa_prim *prim,
202 GLuint nr_prims )
203 {
204 struct gl_context *ctx = &brw->intel.ctx;
205 GLuint i;
206
207 /* If we don't require strict OpenGL conformance, never
208 * use fallbacks. If we're forcing fallbacks, always
209 * use fallfacks.
210 */
211 if (brw->intel.conformance_mode == 0)
212 return GL_FALSE;
213
214 if (brw->intel.conformance_mode == 2)
215 return GL_TRUE;
216
217 if (ctx->Polygon.SmoothFlag) {
218 for (i = 0; i < nr_prims; i++)
219 if (reduced_prim[prim[i].mode] == GL_TRIANGLES)
220 return GL_TRUE;
221 }
222
223 /* BRW hardware will do AA lines, but they are non-conformant it
224 * seems. TBD whether we keep this fallback:
225 */
226 if (ctx->Line.SmoothFlag) {
227 for (i = 0; i < nr_prims; i++)
228 if (reduced_prim[prim[i].mode] == GL_LINES)
229 return GL_TRUE;
230 }
231
232 /* Stipple -- these fallbacks could be resolved with a little
233 * bit of work?
234 */
235 if (ctx->Line.StippleFlag) {
236 for (i = 0; i < nr_prims; i++) {
237 /* GS doesn't get enough information to know when to reset
238 * the stipple counter?!?
239 */
240 if (prim[i].mode == GL_LINE_LOOP || prim[i].mode == GL_LINE_STRIP)
241 return GL_TRUE;
242
243 if (prim[i].mode == GL_POLYGON &&
244 (ctx->Polygon.FrontMode == GL_LINE ||
245 ctx->Polygon.BackMode == GL_LINE))
246 return GL_TRUE;
247 }
248 }
249
250 if (ctx->Point.SmoothFlag) {
251 for (i = 0; i < nr_prims; i++)
252 if (prim[i].mode == GL_POINTS)
253 return GL_TRUE;
254 }
255
256 /* BRW hardware doesn't handle GL_CLAMP texturing correctly;
257 * brw_wm_sampler_state:translate_wrap_mode() treats GL_CLAMP
258 * as GL_CLAMP_TO_EDGE instead. If we're using GL_CLAMP, and
259 * we want strict conformance, force the fallback.
260 * Right now, we only do this for 2D textures.
261 */
262 {
263 int u;
264 for (u = 0; u < ctx->Const.MaxTextureCoordUnits; u++) {
265 struct gl_texture_unit *texUnit = &ctx->Texture.Unit[u];
266 if (texUnit->Enabled) {
267 if (texUnit->Enabled & TEXTURE_1D_BIT) {
268 if (texUnit->CurrentTex[TEXTURE_1D_INDEX]->WrapS == GL_CLAMP) {
269 return GL_TRUE;
270 }
271 }
272 if (texUnit->Enabled & TEXTURE_2D_BIT) {
273 if (texUnit->CurrentTex[TEXTURE_2D_INDEX]->WrapS == GL_CLAMP ||
274 texUnit->CurrentTex[TEXTURE_2D_INDEX]->WrapT == GL_CLAMP) {
275 return GL_TRUE;
276 }
277 }
278 if (texUnit->Enabled & TEXTURE_3D_BIT) {
279 if (texUnit->CurrentTex[TEXTURE_3D_INDEX]->WrapS == GL_CLAMP ||
280 texUnit->CurrentTex[TEXTURE_3D_INDEX]->WrapT == GL_CLAMP ||
281 texUnit->CurrentTex[TEXTURE_3D_INDEX]->WrapR == GL_CLAMP) {
282 return GL_TRUE;
283 }
284 }
285 }
286 }
287 }
288
289 /* Nothing stopping us from the fast path now */
290 return GL_FALSE;
291 }
292
293 /* May fail if out of video memory for texture or vbo upload, or on
294 * fallback conditions.
295 */
296 static GLboolean brw_try_draw_prims( struct gl_context *ctx,
297 const struct gl_client_array *arrays[],
298 const struct _mesa_prim *prim,
299 GLuint nr_prims,
300 const struct _mesa_index_buffer *ib,
301 GLuint min_index,
302 GLuint max_index )
303 {
304 struct intel_context *intel = intel_context(ctx);
305 struct brw_context *brw = brw_context(ctx);
306 GLboolean retval = GL_FALSE;
307 GLboolean warn = GL_FALSE;
308 GLboolean first_time = GL_TRUE;
309 GLuint i;
310
311 if (ctx->NewState)
312 _mesa_update_state( ctx );
313
314 /* We have to validate the textures *before* checking for fallbacks;
315 * otherwise, the software fallback won't be able to rely on the
316 * texture state, the firstLevel and lastLevel fields won't be
317 * set in the intel texture object (they'll both be 0), and the
318 * software fallback will segfault if it attempts to access any
319 * texture level other than level 0.
320 */
321 brw_validate_textures( brw );
322
323 if (check_fallbacks(brw, prim, nr_prims))
324 return GL_FALSE;
325
326 /* Bind all inputs, derive varying and size information:
327 */
328 brw_merge_inputs( brw, arrays );
329
330 brw->ib.ib = ib;
331 brw->state.dirty.brw |= BRW_NEW_INDICES;
332
333 brw->vb.min_index = min_index;
334 brw->vb.max_index = max_index;
335 brw->state.dirty.brw |= BRW_NEW_VERTICES;
336
337 /* Have to validate state quite late. Will rebuild tnl_program,
338 * which depends on varying information.
339 *
340 * Note this is where brw->vs->prog_data.inputs_read is calculated,
341 * so can't access it earlier.
342 */
343
344 intel_prepare_render(intel);
345
346 for (i = 0; i < nr_prims; i++) {
347 uint32_t hw_prim;
348
349 /* Flush the batch if it's approaching full, so that we don't wrap while
350 * we've got validated state that needs to be in the same batch as the
351 * primitives. This fraction is just a guess (minimal full state plus
352 * a primitive is around 512 bytes), and would be better if we had
353 * an upper bound of how much we might emit in a single
354 * brw_try_draw_prims().
355 */
356 intel_batchbuffer_require_space(intel->batch, intel->batch->size / 4);
357
358 hw_prim = brw_set_prim(brw, &prim[i]);
359
360 if (first_time || (brw->state.dirty.brw & BRW_NEW_PRIMITIVE)) {
361 first_time = GL_FALSE;
362
363 brw_validate_state(brw);
364
365 /* Various fallback checks: */
366 if (brw->intel.Fallback)
367 goto out;
368
369 /* Check that we can fit our state in with our existing batchbuffer, or
370 * flush otherwise.
371 */
372 if (dri_bufmgr_check_aperture_space(brw->state.validated_bos,
373 brw->state.validated_bo_count)) {
374 static GLboolean warned;
375 intel_batchbuffer_flush(intel->batch);
376
377 /* Validate the state after we flushed the batch (which would have
378 * changed the set of dirty state). If we still fail to
379 * check_aperture, warn of what's happening, but attempt to continue
380 * on since it may succeed anyway, and the user would probably rather
381 * see a failure and a warning than a fallback.
382 */
383 brw_validate_state(brw);
384 if (!warned &&
385 dri_bufmgr_check_aperture_space(brw->state.validated_bos,
386 brw->state.validated_bo_count)) {
387 warn = GL_TRUE;
388 warned = GL_TRUE;
389 }
390 }
391
392 intel->no_batch_wrap = GL_TRUE;
393 brw_upload_state(brw);
394 }
395
396 brw_emit_prim(brw, &prim[i], hw_prim);
397
398 intel->no_batch_wrap = GL_FALSE;
399
400 retval = GL_TRUE;
401 }
402
403 if (intel->always_flush_batch)
404 intel_batchbuffer_flush(intel->batch);
405 out:
406
407 brw_state_cache_check_size(brw);
408
409 if (warn)
410 fprintf(stderr, "i965: Single primitive emit potentially exceeded "
411 "available aperture space\n");
412
413 if (!retval)
414 DBG("%s failed\n", __FUNCTION__);
415
416 return retval;
417 }
418
419 void brw_draw_prims( struct gl_context *ctx,
420 const struct gl_client_array *arrays[],
421 const struct _mesa_prim *prim,
422 GLuint nr_prims,
423 const struct _mesa_index_buffer *ib,
424 GLboolean index_bounds_valid,
425 GLuint min_index,
426 GLuint max_index )
427 {
428 GLboolean retval;
429
430 if (!vbo_all_varyings_in_vbos(arrays)) {
431 if (!index_bounds_valid)
432 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
433
434 /* Decide if we want to rebase. If so we end up recursing once
435 * only into this function.
436 */
437 if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) {
438 vbo_rebase_prims(ctx, arrays,
439 prim, nr_prims,
440 ib, min_index, max_index,
441 brw_draw_prims );
442 return;
443 }
444 }
445
446 /* Make a first attempt at drawing:
447 */
448 retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
449
450 /* Otherwise, we really are out of memory. Pass the drawing
451 * command to the software tnl module and which will in turn call
452 * swrast to do the drawing.
453 */
454 if (!retval) {
455 _swsetup_Wakeup(ctx);
456 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
457 }
458
459 }
460
461 void brw_draw_init( struct brw_context *brw )
462 {
463 struct gl_context *ctx = &brw->intel.ctx;
464 struct vbo_context *vbo = vbo_context(ctx);
465
466 /* Register our drawing function:
467 */
468 vbo->draw_prims = brw_draw_prims;
469 }
470
471 void brw_draw_destroy( struct brw_context *brw )
472 {
473 int i;
474
475 if (brw->vb.upload.bo != NULL) {
476 drm_intel_bo_unreference(brw->vb.upload.bo);
477 brw->vb.upload.bo = NULL;
478 }
479
480 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
481 drm_intel_bo_unreference(brw->vb.inputs[i].bo);
482 brw->vb.inputs[i].bo = NULL;
483 }
484
485 drm_intel_bo_unreference(brw->ib.bo);
486 brw->ib.bo = NULL;
487 }