i965: Don't bother telling tnl about state updates unless we fall back.
[mesa.git] / src / mesa / drivers / dri / i915 / i830_vtbl.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "i830_context.h"
29 #include "i830_reg.h"
30 #include "intel_batchbuffer.h"
31 #include "intel_regions.h"
32 #include "intel_tris.h"
33 #include "intel_fbo.h"
34 #include "tnl/tnl.h"
35 #include "tnl/t_context.h"
36 #include "tnl/t_vertex.h"
37
38 #define FILE_DEBUG_FLAG DEBUG_STATE
39
40 static GLboolean i830_check_vertex_size(struct intel_context *intel,
41 GLuint expected);
42
43 #define SZ_TO_HW(sz) ((sz-2)&0x3)
44 #define EMIT_SZ(sz) (EMIT_1F + (sz) - 1)
45 #define EMIT_ATTR( ATTR, STYLE, V0 ) \
46 do { \
47 intel->vertex_attrs[intel->vertex_attr_count].attrib = (ATTR); \
48 intel->vertex_attrs[intel->vertex_attr_count].format = (STYLE); \
49 intel->vertex_attr_count++; \
50 v0 |= V0; \
51 } while (0)
52
53 #define EMIT_PAD( N ) \
54 do { \
55 intel->vertex_attrs[intel->vertex_attr_count].attrib = 0; \
56 intel->vertex_attrs[intel->vertex_attr_count].format = EMIT_PAD; \
57 intel->vertex_attrs[intel->vertex_attr_count].offset = (N); \
58 intel->vertex_attr_count++; \
59 } while (0)
60
61
62 #define VRTX_TEX_SET_FMT(n, x) ((x)<<((n)*2))
63 #define TEXBIND_SET(n, x) ((x)<<((n)*4))
64
65 static void
66 i830_render_prevalidate(struct intel_context *intel)
67 {
68 }
69
70 static void
71 i830_render_start(struct intel_context *intel)
72 {
73 struct gl_context *ctx = &intel->ctx;
74 struct i830_context *i830 = i830_context(ctx);
75 TNLcontext *tnl = TNL_CONTEXT(ctx);
76 struct vertex_buffer *VB = &tnl->vb;
77 DECLARE_RENDERINPUTS(index_bitset);
78 GLuint v0 = _3DSTATE_VFT0_CMD;
79 GLuint v2 = _3DSTATE_VFT1_CMD;
80 GLuint mcsb1 = 0;
81
82 RENDERINPUTS_COPY(index_bitset, tnl->render_inputs_bitset);
83
84 /* Important:
85 */
86 VB->AttribPtr[VERT_ATTRIB_POS] = VB->NdcPtr;
87 intel->vertex_attr_count = 0;
88
89 /* EMIT_ATTR's must be in order as they tell t_vertex.c how to
90 * build up a hardware vertex.
91 */
92 if (RENDERINPUTS_TEST_RANGE(index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX)) {
93 EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_4F_VIEWPORT, VFT0_XYZW);
94 intel->coloroffset = 4;
95 }
96 else {
97 EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_3F_VIEWPORT, VFT0_XYZ);
98 intel->coloroffset = 3;
99 }
100
101 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_POINTSIZE)) {
102 EMIT_ATTR(_TNL_ATTRIB_POINTSIZE, EMIT_1F, VFT0_POINT_WIDTH);
103 }
104
105 EMIT_ATTR(_TNL_ATTRIB_COLOR0, EMIT_4UB_4F_BGRA, VFT0_DIFFUSE);
106
107 intel->specoffset = 0;
108 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_COLOR1) ||
109 RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_FOG)) {
110 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_COLOR1)) {
111 intel->specoffset = intel->coloroffset + 1;
112 EMIT_ATTR(_TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR, VFT0_SPEC);
113 }
114 else
115 EMIT_PAD(3);
116
117 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_FOG))
118 EMIT_ATTR(_TNL_ATTRIB_FOG, EMIT_1UB_1F, VFT0_SPEC);
119 else
120 EMIT_PAD(1);
121 }
122
123 if (RENDERINPUTS_TEST_RANGE(index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX)) {
124 int i, count = 0;
125
126 for (i = 0; i < I830_TEX_UNITS; i++) {
127 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_TEX(i))) {
128 GLuint sz = VB->AttribPtr[_TNL_ATTRIB_TEX0 + i]->size;
129 GLuint emit;
130 GLuint mcs = (i830->state.Tex[i][I830_TEXREG_MCS] &
131 ~TEXCOORDTYPE_MASK);
132
133 switch (sz) {
134 case 1:
135 case 2:
136 emit = EMIT_2F;
137 sz = 2;
138 mcs |= TEXCOORDTYPE_CARTESIAN;
139 break;
140 case 3:
141 emit = EMIT_3F;
142 sz = 3;
143 mcs |= TEXCOORDTYPE_VECTOR;
144 break;
145 case 4:
146 emit = EMIT_3F_XYW;
147 sz = 3;
148 mcs |= TEXCOORDTYPE_HOMOGENEOUS;
149 break;
150 default:
151 continue;
152 };
153
154
155 EMIT_ATTR(_TNL_ATTRIB_TEX0 + i, emit, 0);
156 v2 |= VRTX_TEX_SET_FMT(count, SZ_TO_HW(sz));
157 mcsb1 |= (count + 8) << (i * 4);
158
159 if (mcs != i830->state.Tex[i][I830_TEXREG_MCS]) {
160 I830_STATECHANGE(i830, I830_UPLOAD_TEX(i));
161 i830->state.Tex[i][I830_TEXREG_MCS] = mcs;
162 }
163
164 count++;
165 }
166 }
167
168 v0 |= VFT0_TEX_COUNT(count);
169 }
170
171 /* Only need to change the vertex emit code if there has been a
172 * statechange to a new hardware vertex format:
173 */
174 if (v0 != i830->state.Ctx[I830_CTXREG_VF] ||
175 v2 != i830->state.Ctx[I830_CTXREG_VF2] ||
176 mcsb1 != i830->state.Ctx[I830_CTXREG_MCSB1] ||
177 !RENDERINPUTS_EQUAL(index_bitset, i830->last_index_bitset)) {
178 int k;
179
180 I830_STATECHANGE(i830, I830_UPLOAD_CTX);
181
182 /* Must do this *after* statechange, so as not to affect
183 * buffered vertices reliant on the old state:
184 */
185 intel->vertex_size =
186 _tnl_install_attrs(ctx,
187 intel->vertex_attrs,
188 intel->vertex_attr_count,
189 intel->ViewportMatrix.m, 0);
190
191 intel->vertex_size >>= 2;
192
193 i830->state.Ctx[I830_CTXREG_VF] = v0;
194 i830->state.Ctx[I830_CTXREG_VF2] = v2;
195 i830->state.Ctx[I830_CTXREG_MCSB1] = mcsb1;
196 RENDERINPUTS_COPY(i830->last_index_bitset, index_bitset);
197
198 k = i830_check_vertex_size(intel, intel->vertex_size);
199 assert(k);
200 }
201 }
202
203 static void
204 i830_reduced_primitive_state(struct intel_context *intel, GLenum rprim)
205 {
206 struct i830_context *i830 = i830_context(&intel->ctx);
207 GLuint st1 = i830->state.Stipple[I830_STPREG_ST1];
208
209 st1 &= ~ST1_ENABLE;
210
211 switch (rprim) {
212 case GL_TRIANGLES:
213 if (intel->ctx.Polygon.StippleFlag && intel->hw_stipple)
214 st1 |= ST1_ENABLE;
215 break;
216 case GL_LINES:
217 case GL_POINTS:
218 default:
219 break;
220 }
221
222 i830->intel.reduced_primitive = rprim;
223
224 if (st1 != i830->state.Stipple[I830_STPREG_ST1]) {
225 INTEL_FIREVERTICES(intel);
226
227 I830_STATECHANGE(i830, I830_UPLOAD_STIPPLE);
228 i830->state.Stipple[I830_STPREG_ST1] = st1;
229 }
230 }
231
232 /* Pull apart the vertex format registers and figure out how large a
233 * vertex is supposed to be.
234 */
235 static GLboolean
236 i830_check_vertex_size(struct intel_context *intel, GLuint expected)
237 {
238 struct i830_context *i830 = i830_context(&intel->ctx);
239 int vft0 = i830->state.Ctx[I830_CTXREG_VF];
240 int vft1 = i830->state.Ctx[I830_CTXREG_VF2];
241 int nrtex = (vft0 & VFT0_TEX_COUNT_MASK) >> VFT0_TEX_COUNT_SHIFT;
242 int i, sz = 0;
243
244 switch (vft0 & VFT0_XYZW_MASK) {
245 case VFT0_XY:
246 sz = 2;
247 break;
248 case VFT0_XYZ:
249 sz = 3;
250 break;
251 case VFT0_XYW:
252 sz = 3;
253 break;
254 case VFT0_XYZW:
255 sz = 4;
256 break;
257 default:
258 fprintf(stderr, "no xyzw specified\n");
259 return 0;
260 }
261
262 if (vft0 & VFT0_SPEC)
263 sz++;
264 if (vft0 & VFT0_DIFFUSE)
265 sz++;
266 if (vft0 & VFT0_DEPTH_OFFSET)
267 sz++;
268 if (vft0 & VFT0_POINT_WIDTH)
269 sz++;
270
271 for (i = 0; i < nrtex; i++) {
272 switch (vft1 & VFT1_TEX0_MASK) {
273 case TEXCOORDFMT_2D:
274 sz += 2;
275 break;
276 case TEXCOORDFMT_3D:
277 sz += 3;
278 break;
279 case TEXCOORDFMT_4D:
280 sz += 4;
281 break;
282 case TEXCOORDFMT_1D:
283 sz += 1;
284 break;
285 }
286 vft1 >>= VFT1_TEX1_SHIFT;
287 }
288
289 if (sz != expected)
290 fprintf(stderr, "vertex size mismatch %d/%d\n", sz, expected);
291
292 return sz == expected;
293 }
294
295 static void
296 i830_emit_invarient_state(struct intel_context *intel)
297 {
298 BATCH_LOCALS;
299
300 BEGIN_BATCH(29);
301
302 OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
303 OUT_BATCH(0);
304
305 OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD);
306 OUT_BATCH(0);
307
308 OUT_BATCH(_3DSTATE_DFLT_Z_CMD);
309 OUT_BATCH(0);
310
311 OUT_BATCH(_3DSTATE_FOG_MODE_CMD);
312 OUT_BATCH(FOGFUNC_ENABLE |
313 FOG_LINEAR_CONST | FOGSRC_INDEX_Z | ENABLE_FOG_DENSITY);
314 OUT_BATCH(0);
315 OUT_BATCH(0);
316
317
318 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
319 MAP_UNIT(0) |
320 DISABLE_TEX_STREAM_BUMP |
321 ENABLE_TEX_STREAM_COORD_SET |
322 TEX_STREAM_COORD_SET(0) |
323 ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(0));
324 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
325 MAP_UNIT(1) |
326 DISABLE_TEX_STREAM_BUMP |
327 ENABLE_TEX_STREAM_COORD_SET |
328 TEX_STREAM_COORD_SET(1) |
329 ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(1));
330 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
331 MAP_UNIT(2) |
332 DISABLE_TEX_STREAM_BUMP |
333 ENABLE_TEX_STREAM_COORD_SET |
334 TEX_STREAM_COORD_SET(2) |
335 ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(2));
336 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
337 MAP_UNIT(3) |
338 DISABLE_TEX_STREAM_BUMP |
339 ENABLE_TEX_STREAM_COORD_SET |
340 TEX_STREAM_COORD_SET(3) |
341 ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(3));
342
343 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
344 OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(0));
345 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
346 OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(1));
347 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
348 OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(2));
349 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
350 OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(3));
351
352 OUT_BATCH(_3DSTATE_VERTEX_TRANSFORM);
353 OUT_BATCH(DISABLE_VIEWPORT_TRANSFORM | DISABLE_PERSPECTIVE_DIVIDE);
354
355 OUT_BATCH(_3DSTATE_W_STATE_CMD);
356 OUT_BATCH(MAGIC_W_STATE_DWORD1);
357 OUT_BATCH(0x3f800000 /* 1.0 in IEEE float */ );
358
359
360 OUT_BATCH(_3DSTATE_COLOR_FACTOR_CMD);
361 OUT_BATCH(0x80808080); /* .5 required in alpha for GL_DOT3_RGBA_EXT */
362
363 ADVANCE_BATCH();
364 }
365
366
367 #define emit( intel, state, size ) \
368 intel_batchbuffer_data(intel, state, size, false)
369
370 static GLuint
371 get_dirty(struct i830_hw_state *state)
372 {
373 return state->active & ~state->emitted;
374 }
375
376 static GLuint
377 get_state_size(struct i830_hw_state *state)
378 {
379 GLuint dirty = get_dirty(state);
380 GLuint sz = 0;
381 GLuint i;
382
383 if (dirty & I830_UPLOAD_INVARIENT)
384 sz += 40 * sizeof(int);
385
386 if (dirty & I830_UPLOAD_RASTER_RULES)
387 sz += sizeof(state->RasterRules);
388
389 if (dirty & I830_UPLOAD_CTX)
390 sz += sizeof(state->Ctx);
391
392 if (dirty & I830_UPLOAD_BUFFERS)
393 sz += sizeof(state->Buffer);
394
395 if (dirty & I830_UPLOAD_STIPPLE)
396 sz += sizeof(state->Stipple);
397
398 for (i = 0; i < I830_TEX_UNITS; i++) {
399 if ((dirty & I830_UPLOAD_TEX(i)))
400 sz += sizeof(state->Tex[i]);
401
402 if (dirty & I830_UPLOAD_TEXBLEND(i))
403 sz += state->TexBlendWordsUsed[i] * 4;
404 }
405
406 return sz;
407 }
408
409
410 /* Push the state into the sarea and/or texture memory.
411 */
412 static void
413 i830_emit_state(struct intel_context *intel)
414 {
415 struct i830_context *i830 = i830_context(&intel->ctx);
416 struct i830_hw_state *state = &i830->state;
417 int i, count;
418 GLuint dirty;
419 drm_intel_bo *aper_array[3 + I830_TEX_UNITS];
420 int aper_count;
421 GET_CURRENT_CONTEXT(ctx);
422 BATCH_LOCALS;
423
424 /* We don't hold the lock at this point, so want to make sure that
425 * there won't be a buffer wrap between the state emits and the primitive
426 * emit header.
427 *
428 * It might be better to talk about explicit places where
429 * scheduling is allowed, rather than assume that it is whenever a
430 * batchbuffer fills up.
431 */
432 intel_batchbuffer_require_space(intel,
433 get_state_size(state) + INTEL_PRIM_EMIT_SIZE,
434 false);
435 count = 0;
436 again:
437 aper_count = 0;
438 dirty = get_dirty(state);
439
440 aper_array[aper_count++] = intel->batch.bo;
441 if (dirty & I830_UPLOAD_BUFFERS) {
442 aper_array[aper_count++] = state->draw_region->buffer;
443 if (state->depth_region)
444 aper_array[aper_count++] = state->depth_region->buffer;
445 }
446
447 for (i = 0; i < I830_TEX_UNITS; i++)
448 if (dirty & I830_UPLOAD_TEX(i)) {
449 if (state->tex_buffer[i]) {
450 aper_array[aper_count++] = state->tex_buffer[i];
451 }
452 }
453
454 if (dri_bufmgr_check_aperture_space(aper_array, aper_count)) {
455 if (count == 0) {
456 count++;
457 intel_batchbuffer_flush(intel);
458 goto again;
459 } else {
460 _mesa_error(ctx, GL_OUT_OF_MEMORY, "i830 emit state");
461 assert(0);
462 }
463 }
464
465
466 /* Do this here as we may have flushed the batchbuffer above,
467 * causing more state to be dirty!
468 */
469 dirty = get_dirty(state);
470 state->emitted |= dirty;
471 assert(get_dirty(state) == 0);
472
473 if (dirty & I830_UPLOAD_INVARIENT) {
474 DBG("I830_UPLOAD_INVARIENT:\n");
475 i830_emit_invarient_state(intel);
476 }
477
478 if (dirty & I830_UPLOAD_RASTER_RULES) {
479 DBG("I830_UPLOAD_RASTER_RULES:\n");
480 emit(intel, state->RasterRules, sizeof(state->RasterRules));
481 }
482
483 if (dirty & I830_UPLOAD_CTX) {
484 DBG("I830_UPLOAD_CTX:\n");
485 emit(intel, state->Ctx, sizeof(state->Ctx));
486
487 }
488
489 if (dirty & I830_UPLOAD_BUFFERS) {
490 GLuint count = 15;
491
492 DBG("I830_UPLOAD_BUFFERS:\n");
493
494 if (state->depth_region)
495 count += 3;
496
497 BEGIN_BATCH(count);
498 OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR0]);
499 OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR1]);
500 OUT_RELOC(state->draw_region->buffer,
501 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
502
503 if (state->depth_region) {
504 OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR0]);
505 OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR1]);
506 OUT_RELOC(state->depth_region->buffer,
507 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
508 }
509
510 OUT_BATCH(state->Buffer[I830_DESTREG_DV0]);
511 OUT_BATCH(state->Buffer[I830_DESTREG_DV1]);
512 OUT_BATCH(state->Buffer[I830_DESTREG_SENABLE]);
513 OUT_BATCH(state->Buffer[I830_DESTREG_SR0]);
514 OUT_BATCH(state->Buffer[I830_DESTREG_SR1]);
515 OUT_BATCH(state->Buffer[I830_DESTREG_SR2]);
516
517 assert(state->Buffer[I830_DESTREG_DRAWRECT0] != MI_NOOP);
518 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT0]);
519 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT1]);
520 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT2]);
521 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT3]);
522 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT4]);
523 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT5]);
524 ADVANCE_BATCH();
525 }
526
527 if (dirty & I830_UPLOAD_STIPPLE) {
528 DBG("I830_UPLOAD_STIPPLE:\n");
529 emit(intel, state->Stipple, sizeof(state->Stipple));
530 }
531
532 for (i = 0; i < I830_TEX_UNITS; i++) {
533 if ((dirty & I830_UPLOAD_TEX(i))) {
534 DBG("I830_UPLOAD_TEX(%d):\n", i);
535
536 BEGIN_BATCH(I830_TEX_SETUP_SIZE + 1);
537 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0LI]);
538
539 OUT_RELOC(state->tex_buffer[i],
540 I915_GEM_DOMAIN_SAMPLER, 0,
541 state->tex_offset[i]);
542
543 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S1]);
544 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S2]);
545 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S3]);
546 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S4]);
547 OUT_BATCH(state->Tex[i][I830_TEXREG_MCS]);
548 OUT_BATCH(state->Tex[i][I830_TEXREG_CUBE]);
549
550 ADVANCE_BATCH();
551 }
552
553 if (dirty & I830_UPLOAD_TEXBLEND(i)) {
554 DBG("I830_UPLOAD_TEXBLEND(%d): %d words\n", i,
555 state->TexBlendWordsUsed[i]);
556 emit(intel, state->TexBlend[i], state->TexBlendWordsUsed[i] * 4);
557 }
558 }
559
560 assert(get_dirty(state) == 0);
561 }
562
563 static void
564 i830_destroy_context(struct intel_context *intel)
565 {
566 GLuint i;
567 struct i830_context *i830 = i830_context(&intel->ctx);
568
569 intel_region_release(&i830->state.draw_region);
570 intel_region_release(&i830->state.depth_region);
571
572 for (i = 0; i < I830_TEX_UNITS; i++) {
573 if (i830->state.tex_buffer[i] != NULL) {
574 drm_intel_bo_unreference(i830->state.tex_buffer[i]);
575 i830->state.tex_buffer[i] = NULL;
576 }
577 }
578
579 _tnl_free_vertices(&intel->ctx);
580 }
581
582 static uint32_t i830_render_target_format_for_mesa_format[MESA_FORMAT_COUNT] =
583 {
584 [MESA_FORMAT_ARGB8888] = DV_PF_8888,
585 [MESA_FORMAT_XRGB8888] = DV_PF_8888,
586 [MESA_FORMAT_RGB565] = DV_PF_565,
587 [MESA_FORMAT_ARGB1555] = DV_PF_1555,
588 [MESA_FORMAT_ARGB4444] = DV_PF_4444,
589 };
590
591 static bool
592 i830_render_target_supported(gl_format format)
593 {
594 if (format == MESA_FORMAT_S8_Z24 ||
595 format == MESA_FORMAT_X8_Z24 ||
596 format == MESA_FORMAT_Z16) {
597 return true;
598 }
599
600 return i830_render_target_format_for_mesa_format[format] != 0;
601 }
602
603 static void
604 i830_set_draw_region(struct intel_context *intel,
605 struct intel_region *color_regions[],
606 struct intel_region *depth_region,
607 GLuint num_regions)
608 {
609 struct i830_context *i830 = i830_context(&intel->ctx);
610 struct gl_context *ctx = &intel->ctx;
611 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[0];
612 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
613 struct gl_renderbuffer *drb;
614 struct intel_renderbuffer *idrb = NULL;
615 GLuint value;
616 struct i830_hw_state *state = &i830->state;
617 uint32_t draw_x, draw_y;
618
619 if (state->draw_region != color_regions[0]) {
620 intel_region_release(&state->draw_region);
621 intel_region_reference(&state->draw_region, color_regions[0]);
622 }
623 if (state->depth_region != depth_region) {
624 intel_region_release(&state->depth_region);
625 intel_region_reference(&state->depth_region, depth_region);
626 }
627
628 /*
629 * Set stride/cpp values
630 */
631 i915_set_buf_info_for_region(&state->Buffer[I830_DESTREG_CBUFADDR0],
632 color_regions[0], BUF_3D_ID_COLOR_BACK);
633
634 i915_set_buf_info_for_region(&state->Buffer[I830_DESTREG_DBUFADDR0],
635 depth_region, BUF_3D_ID_DEPTH);
636
637 /*
638 * Compute/set I830_DESTREG_DV1 value
639 */
640 value = (DSTORG_HORT_BIAS(0x8) | /* .5 */
641 DSTORG_VERT_BIAS(0x8) | DEPTH_IS_Z); /* .5 */
642
643 if (irb != NULL) {
644 value |= i830_render_target_format_for_mesa_format[irb->Base.Format];
645 }
646
647 if (depth_region && depth_region->cpp == 4) {
648 value |= DEPTH_FRMT_24_FIXED_8_OTHER;
649 }
650 else {
651 value |= DEPTH_FRMT_16_FIXED;
652 }
653 state->Buffer[I830_DESTREG_DV1] = value;
654
655 drb = ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Renderbuffer;
656 if (!drb)
657 drb = ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Renderbuffer;
658
659 if (drb)
660 idrb = intel_renderbuffer(drb);
661
662 /* We set up the drawing rectangle to be offset into the color
663 * region's location in the miptree. If it doesn't match with
664 * depth's offsets, we can't render to it.
665 *
666 * (Well, not actually true -- the hw grew a bit to let depth's
667 * offset get forced to 0,0. We may want to use that if people are
668 * hitting that case. Also, some configurations may be supportable
669 * by tweaking the start offset of the buffers around, which we
670 * can't do in general due to tiling)
671 */
672 FALLBACK(intel, I830_FALLBACK_DRAW_OFFSET,
673 idrb && irb && (idrb->draw_x != irb->draw_x ||
674 idrb->draw_y != irb->draw_y));
675
676 if (irb) {
677 draw_x = irb->draw_x;
678 draw_y = irb->draw_y;
679 } else if (idrb) {
680 draw_x = idrb->draw_x;
681 draw_y = idrb->draw_y;
682 } else {
683 draw_x = 0;
684 draw_y = 0;
685 }
686
687 state->Buffer[I830_DESTREG_DRAWRECT0] = _3DSTATE_DRAWRECT_INFO;
688 state->Buffer[I830_DESTREG_DRAWRECT1] = 0;
689 state->Buffer[I830_DESTREG_DRAWRECT2] = (draw_y << 16) | draw_x;
690 state->Buffer[I830_DESTREG_DRAWRECT3] =
691 ((ctx->DrawBuffer->Width + draw_x) & 0xffff) |
692 ((ctx->DrawBuffer->Height + draw_y) << 16);
693 state->Buffer[I830_DESTREG_DRAWRECT4] = (draw_y << 16) | draw_x;
694 state->Buffer[I830_DESTREG_DRAWRECT5] = MI_NOOP;
695
696 I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
697 }
698
699 /* This isn't really handled at the moment.
700 */
701 static void
702 i830_new_batch(struct intel_context *intel)
703 {
704 struct i830_context *i830 = i830_context(&intel->ctx);
705 i830->state.emitted = 0;
706 }
707
708 static void
709 i830_assert_not_dirty( struct intel_context *intel )
710 {
711 struct i830_context *i830 = i830_context(&intel->ctx);
712 assert(!get_dirty(&i830->state));
713 (void) i830;
714 }
715
716 static void
717 i830_invalidate_state(struct intel_context *intel, GLuint new_state)
718 {
719 struct gl_context *ctx = &intel->ctx;
720
721 _tnl_InvalidateState(ctx, new_state);
722 _tnl_invalidate_vertex_state(ctx, new_state);
723
724 if (new_state & _NEW_LIGHT)
725 i830_update_provoking_vertex(&intel->ctx);
726 }
727
728 void
729 i830InitVtbl(struct i830_context *i830)
730 {
731 i830->intel.vtbl.check_vertex_size = i830_check_vertex_size;
732 i830->intel.vtbl.destroy = i830_destroy_context;
733 i830->intel.vtbl.emit_state = i830_emit_state;
734 i830->intel.vtbl.new_batch = i830_new_batch;
735 i830->intel.vtbl.reduced_primitive_state = i830_reduced_primitive_state;
736 i830->intel.vtbl.set_draw_region = i830_set_draw_region;
737 i830->intel.vtbl.update_texture_state = i830UpdateTextureState;
738 i830->intel.vtbl.render_start = i830_render_start;
739 i830->intel.vtbl.render_prevalidate = i830_render_prevalidate;
740 i830->intel.vtbl.assert_not_dirty = i830_assert_not_dirty;
741 i830->intel.vtbl.finish_batch = intel_finish_vb;
742 i830->intel.vtbl.invalidate_state = i830_invalidate_state;
743 i830->intel.vtbl.render_target_supported = i830_render_target_supported;
744 }