i915: Fix build since hiz merge.
[mesa.git] / src / mesa / drivers / dri / i915 / i830_vtbl.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "i830_context.h"
29 #include "i830_reg.h"
30 #include "intel_batchbuffer.h"
31 #include "intel_mipmap_tree.h"
32 #include "intel_regions.h"
33 #include "intel_tris.h"
34 #include "intel_fbo.h"
35 #include "intel_buffers.h"
36 #include "tnl/tnl.h"
37 #include "tnl/t_context.h"
38 #include "tnl/t_vertex.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "main/renderbuffer.h"
41 #include "main/framebuffer.h"
42
43 #define FILE_DEBUG_FLAG DEBUG_STATE
44
45 static bool i830_check_vertex_size(struct intel_context *intel,
46 GLuint expected);
47
48 #define SZ_TO_HW(sz) ((sz-2)&0x3)
49 #define EMIT_SZ(sz) (EMIT_1F + (sz) - 1)
50 #define EMIT_ATTR( ATTR, STYLE, V0 ) \
51 do { \
52 intel->vertex_attrs[intel->vertex_attr_count].attrib = (ATTR); \
53 intel->vertex_attrs[intel->vertex_attr_count].format = (STYLE); \
54 intel->vertex_attr_count++; \
55 v0 |= V0; \
56 } while (0)
57
58 #define EMIT_PAD( N ) \
59 do { \
60 intel->vertex_attrs[intel->vertex_attr_count].attrib = 0; \
61 intel->vertex_attrs[intel->vertex_attr_count].format = EMIT_PAD; \
62 intel->vertex_attrs[intel->vertex_attr_count].offset = (N); \
63 intel->vertex_attr_count++; \
64 } while (0)
65
66
67 #define VRTX_TEX_SET_FMT(n, x) ((x)<<((n)*2))
68 #define TEXBIND_SET(n, x) ((x)<<((n)*4))
69
70 static void
71 i830_render_prevalidate(struct intel_context *intel)
72 {
73 }
74
75 static void
76 i830_render_start(struct intel_context *intel)
77 {
78 struct gl_context *ctx = &intel->ctx;
79 struct i830_context *i830 = i830_context(ctx);
80 TNLcontext *tnl = TNL_CONTEXT(ctx);
81 struct vertex_buffer *VB = &tnl->vb;
82 DECLARE_RENDERINPUTS(index_bitset);
83 GLuint v0 = _3DSTATE_VFT0_CMD;
84 GLuint v2 = _3DSTATE_VFT1_CMD;
85 GLuint mcsb1 = 0;
86
87 RENDERINPUTS_COPY(index_bitset, tnl->render_inputs_bitset);
88
89 /* Important:
90 */
91 VB->AttribPtr[VERT_ATTRIB_POS] = VB->NdcPtr;
92 intel->vertex_attr_count = 0;
93
94 /* EMIT_ATTR's must be in order as they tell t_vertex.c how to
95 * build up a hardware vertex.
96 */
97 if (RENDERINPUTS_TEST_RANGE(index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX)) {
98 EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_4F_VIEWPORT, VFT0_XYZW);
99 intel->coloroffset = 4;
100 }
101 else {
102 EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_3F_VIEWPORT, VFT0_XYZ);
103 intel->coloroffset = 3;
104 }
105
106 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_POINTSIZE)) {
107 EMIT_ATTR(_TNL_ATTRIB_POINTSIZE, EMIT_1F, VFT0_POINT_WIDTH);
108 }
109
110 EMIT_ATTR(_TNL_ATTRIB_COLOR0, EMIT_4UB_4F_BGRA, VFT0_DIFFUSE);
111
112 intel->specoffset = 0;
113 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_COLOR1) ||
114 RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_FOG)) {
115 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_COLOR1)) {
116 intel->specoffset = intel->coloroffset + 1;
117 EMIT_ATTR(_TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR, VFT0_SPEC);
118 }
119 else
120 EMIT_PAD(3);
121
122 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_FOG))
123 EMIT_ATTR(_TNL_ATTRIB_FOG, EMIT_1UB_1F, VFT0_SPEC);
124 else
125 EMIT_PAD(1);
126 }
127
128 if (RENDERINPUTS_TEST_RANGE(index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX)) {
129 int i, count = 0;
130
131 for (i = 0; i < I830_TEX_UNITS; i++) {
132 if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_TEX(i))) {
133 GLuint sz = VB->AttribPtr[_TNL_ATTRIB_TEX0 + i]->size;
134 GLuint emit;
135 GLuint mcs = (i830->state.Tex[i][I830_TEXREG_MCS] &
136 ~TEXCOORDTYPE_MASK);
137
138 switch (sz) {
139 case 1:
140 case 2:
141 emit = EMIT_2F;
142 sz = 2;
143 mcs |= TEXCOORDTYPE_CARTESIAN;
144 break;
145 case 3:
146 emit = EMIT_3F;
147 sz = 3;
148 mcs |= TEXCOORDTYPE_VECTOR;
149 break;
150 case 4:
151 emit = EMIT_3F_XYW;
152 sz = 3;
153 mcs |= TEXCOORDTYPE_HOMOGENEOUS;
154 break;
155 default:
156 continue;
157 };
158
159
160 EMIT_ATTR(_TNL_ATTRIB_TEX0 + i, emit, 0);
161 v2 |= VRTX_TEX_SET_FMT(count, SZ_TO_HW(sz));
162 mcsb1 |= (count + 8) << (i * 4);
163
164 if (mcs != i830->state.Tex[i][I830_TEXREG_MCS]) {
165 I830_STATECHANGE(i830, I830_UPLOAD_TEX(i));
166 i830->state.Tex[i][I830_TEXREG_MCS] = mcs;
167 }
168
169 count++;
170 }
171 }
172
173 v0 |= VFT0_TEX_COUNT(count);
174 }
175
176 /* Only need to change the vertex emit code if there has been a
177 * statechange to a new hardware vertex format:
178 */
179 if (v0 != i830->state.Ctx[I830_CTXREG_VF] ||
180 v2 != i830->state.Ctx[I830_CTXREG_VF2] ||
181 mcsb1 != i830->state.Ctx[I830_CTXREG_MCSB1] ||
182 !RENDERINPUTS_EQUAL(index_bitset, i830->last_index_bitset)) {
183 int k;
184
185 I830_STATECHANGE(i830, I830_UPLOAD_CTX);
186
187 /* Must do this *after* statechange, so as not to affect
188 * buffered vertices reliant on the old state:
189 */
190 intel->vertex_size =
191 _tnl_install_attrs(ctx,
192 intel->vertex_attrs,
193 intel->vertex_attr_count,
194 intel->ViewportMatrix.m, 0);
195
196 intel->vertex_size >>= 2;
197
198 i830->state.Ctx[I830_CTXREG_VF] = v0;
199 i830->state.Ctx[I830_CTXREG_VF2] = v2;
200 i830->state.Ctx[I830_CTXREG_MCSB1] = mcsb1;
201 RENDERINPUTS_COPY(i830->last_index_bitset, index_bitset);
202
203 k = i830_check_vertex_size(intel, intel->vertex_size);
204 assert(k);
205 }
206 }
207
208 static void
209 i830_reduced_primitive_state(struct intel_context *intel, GLenum rprim)
210 {
211 struct i830_context *i830 = i830_context(&intel->ctx);
212 GLuint st1 = i830->state.Stipple[I830_STPREG_ST1];
213
214 st1 &= ~ST1_ENABLE;
215
216 switch (rprim) {
217 case GL_TRIANGLES:
218 if (intel->ctx.Polygon.StippleFlag && intel->hw_stipple)
219 st1 |= ST1_ENABLE;
220 break;
221 case GL_LINES:
222 case GL_POINTS:
223 default:
224 break;
225 }
226
227 i830->intel.reduced_primitive = rprim;
228
229 if (st1 != i830->state.Stipple[I830_STPREG_ST1]) {
230 INTEL_FIREVERTICES(intel);
231
232 I830_STATECHANGE(i830, I830_UPLOAD_STIPPLE);
233 i830->state.Stipple[I830_STPREG_ST1] = st1;
234 }
235 }
236
237 /* Pull apart the vertex format registers and figure out how large a
238 * vertex is supposed to be.
239 */
240 static bool
241 i830_check_vertex_size(struct intel_context *intel, GLuint expected)
242 {
243 struct i830_context *i830 = i830_context(&intel->ctx);
244 int vft0 = i830->state.Ctx[I830_CTXREG_VF];
245 int vft1 = i830->state.Ctx[I830_CTXREG_VF2];
246 int nrtex = (vft0 & VFT0_TEX_COUNT_MASK) >> VFT0_TEX_COUNT_SHIFT;
247 int i, sz = 0;
248
249 switch (vft0 & VFT0_XYZW_MASK) {
250 case VFT0_XY:
251 sz = 2;
252 break;
253 case VFT0_XYZ:
254 sz = 3;
255 break;
256 case VFT0_XYW:
257 sz = 3;
258 break;
259 case VFT0_XYZW:
260 sz = 4;
261 break;
262 default:
263 fprintf(stderr, "no xyzw specified\n");
264 return 0;
265 }
266
267 if (vft0 & VFT0_SPEC)
268 sz++;
269 if (vft0 & VFT0_DIFFUSE)
270 sz++;
271 if (vft0 & VFT0_DEPTH_OFFSET)
272 sz++;
273 if (vft0 & VFT0_POINT_WIDTH)
274 sz++;
275
276 for (i = 0; i < nrtex; i++) {
277 switch (vft1 & VFT1_TEX0_MASK) {
278 case TEXCOORDFMT_2D:
279 sz += 2;
280 break;
281 case TEXCOORDFMT_3D:
282 sz += 3;
283 break;
284 case TEXCOORDFMT_4D:
285 sz += 4;
286 break;
287 case TEXCOORDFMT_1D:
288 sz += 1;
289 break;
290 }
291 vft1 >>= VFT1_TEX1_SHIFT;
292 }
293
294 if (sz != expected)
295 fprintf(stderr, "vertex size mismatch %d/%d\n", sz, expected);
296
297 return sz == expected;
298 }
299
300 static void
301 i830_emit_invarient_state(struct intel_context *intel)
302 {
303 BATCH_LOCALS;
304
305 BEGIN_BATCH(29);
306
307 OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
308 OUT_BATCH(0);
309
310 OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD);
311 OUT_BATCH(0);
312
313 OUT_BATCH(_3DSTATE_DFLT_Z_CMD);
314 OUT_BATCH(0);
315
316 OUT_BATCH(_3DSTATE_FOG_MODE_CMD);
317 OUT_BATCH(FOGFUNC_ENABLE |
318 FOG_LINEAR_CONST | FOGSRC_INDEX_Z | ENABLE_FOG_DENSITY);
319 OUT_BATCH(0);
320 OUT_BATCH(0);
321
322
323 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
324 MAP_UNIT(0) |
325 DISABLE_TEX_STREAM_BUMP |
326 ENABLE_TEX_STREAM_COORD_SET |
327 TEX_STREAM_COORD_SET(0) |
328 ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(0));
329 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
330 MAP_UNIT(1) |
331 DISABLE_TEX_STREAM_BUMP |
332 ENABLE_TEX_STREAM_COORD_SET |
333 TEX_STREAM_COORD_SET(1) |
334 ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(1));
335 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
336 MAP_UNIT(2) |
337 DISABLE_TEX_STREAM_BUMP |
338 ENABLE_TEX_STREAM_COORD_SET |
339 TEX_STREAM_COORD_SET(2) |
340 ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(2));
341 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
342 MAP_UNIT(3) |
343 DISABLE_TEX_STREAM_BUMP |
344 ENABLE_TEX_STREAM_COORD_SET |
345 TEX_STREAM_COORD_SET(3) |
346 ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(3));
347
348 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
349 OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(0));
350 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
351 OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(1));
352 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
353 OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(2));
354 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
355 OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(3));
356
357 OUT_BATCH(_3DSTATE_VERTEX_TRANSFORM);
358 OUT_BATCH(DISABLE_VIEWPORT_TRANSFORM | DISABLE_PERSPECTIVE_DIVIDE);
359
360 OUT_BATCH(_3DSTATE_W_STATE_CMD);
361 OUT_BATCH(MAGIC_W_STATE_DWORD1);
362 OUT_BATCH(0x3f800000 /* 1.0 in IEEE float */ );
363
364
365 OUT_BATCH(_3DSTATE_COLOR_FACTOR_CMD);
366 OUT_BATCH(0x80808080); /* .5 required in alpha for GL_DOT3_RGBA_EXT */
367
368 ADVANCE_BATCH();
369 }
370
371
372 #define emit( intel, state, size ) \
373 intel_batchbuffer_data(intel, state, size, false)
374
375 static GLuint
376 get_dirty(struct i830_hw_state *state)
377 {
378 return state->active & ~state->emitted;
379 }
380
381 static GLuint
382 get_state_size(struct i830_hw_state *state)
383 {
384 GLuint dirty = get_dirty(state);
385 GLuint sz = 0;
386 GLuint i;
387
388 if (dirty & I830_UPLOAD_INVARIENT)
389 sz += 40 * sizeof(int);
390
391 if (dirty & I830_UPLOAD_RASTER_RULES)
392 sz += sizeof(state->RasterRules);
393
394 if (dirty & I830_UPLOAD_CTX)
395 sz += sizeof(state->Ctx);
396
397 if (dirty & I830_UPLOAD_BUFFERS)
398 sz += sizeof(state->Buffer);
399
400 if (dirty & I830_UPLOAD_STIPPLE)
401 sz += sizeof(state->Stipple);
402
403 for (i = 0; i < I830_TEX_UNITS; i++) {
404 if ((dirty & I830_UPLOAD_TEX(i)))
405 sz += sizeof(state->Tex[i]);
406
407 if (dirty & I830_UPLOAD_TEXBLEND(i))
408 sz += state->TexBlendWordsUsed[i] * 4;
409 }
410
411 return sz;
412 }
413
414
415 /* Push the state into the sarea and/or texture memory.
416 */
417 static void
418 i830_emit_state(struct intel_context *intel)
419 {
420 struct i830_context *i830 = i830_context(&intel->ctx);
421 struct i830_hw_state *state = &i830->state;
422 int i, count;
423 GLuint dirty;
424 drm_intel_bo *aper_array[3 + I830_TEX_UNITS];
425 int aper_count;
426 GET_CURRENT_CONTEXT(ctx);
427 BATCH_LOCALS;
428
429 /* We don't hold the lock at this point, so want to make sure that
430 * there won't be a buffer wrap between the state emits and the primitive
431 * emit header.
432 *
433 * It might be better to talk about explicit places where
434 * scheduling is allowed, rather than assume that it is whenever a
435 * batchbuffer fills up.
436 */
437 intel_batchbuffer_require_space(intel,
438 get_state_size(state) + INTEL_PRIM_EMIT_SIZE,
439 false);
440 count = 0;
441 again:
442 aper_count = 0;
443 dirty = get_dirty(state);
444
445 aper_array[aper_count++] = intel->batch.bo;
446 if (dirty & I830_UPLOAD_BUFFERS) {
447 aper_array[aper_count++] = state->draw_region->bo;
448 if (state->depth_region)
449 aper_array[aper_count++] = state->depth_region->bo;
450 }
451
452 for (i = 0; i < I830_TEX_UNITS; i++)
453 if (dirty & I830_UPLOAD_TEX(i)) {
454 if (state->tex_buffer[i]) {
455 aper_array[aper_count++] = state->tex_buffer[i];
456 }
457 }
458
459 if (dri_bufmgr_check_aperture_space(aper_array, aper_count)) {
460 if (count == 0) {
461 count++;
462 intel_batchbuffer_flush(intel);
463 goto again;
464 } else {
465 _mesa_error(ctx, GL_OUT_OF_MEMORY, "i830 emit state");
466 assert(0);
467 }
468 }
469
470
471 /* Do this here as we may have flushed the batchbuffer above,
472 * causing more state to be dirty!
473 */
474 dirty = get_dirty(state);
475 state->emitted |= dirty;
476 assert(get_dirty(state) == 0);
477
478 if (dirty & I830_UPLOAD_INVARIENT) {
479 DBG("I830_UPLOAD_INVARIENT:\n");
480 i830_emit_invarient_state(intel);
481 }
482
483 if (dirty & I830_UPLOAD_RASTER_RULES) {
484 DBG("I830_UPLOAD_RASTER_RULES:\n");
485 emit(intel, state->RasterRules, sizeof(state->RasterRules));
486 }
487
488 if (dirty & I830_UPLOAD_CTX) {
489 DBG("I830_UPLOAD_CTX:\n");
490 emit(intel, state->Ctx, sizeof(state->Ctx));
491
492 }
493
494 if (dirty & I830_UPLOAD_BUFFERS) {
495 GLuint count = 15;
496
497 DBG("I830_UPLOAD_BUFFERS:\n");
498
499 if (state->depth_region)
500 count += 3;
501
502 BEGIN_BATCH(count);
503 OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR0]);
504 OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR1]);
505 OUT_RELOC(state->draw_region->bo,
506 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
507
508 if (state->depth_region) {
509 OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR0]);
510 OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR1]);
511 OUT_RELOC(state->depth_region->bo,
512 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
513 }
514
515 OUT_BATCH(state->Buffer[I830_DESTREG_DV0]);
516 OUT_BATCH(state->Buffer[I830_DESTREG_DV1]);
517 OUT_BATCH(state->Buffer[I830_DESTREG_SENABLE]);
518 OUT_BATCH(state->Buffer[I830_DESTREG_SR0]);
519 OUT_BATCH(state->Buffer[I830_DESTREG_SR1]);
520 OUT_BATCH(state->Buffer[I830_DESTREG_SR2]);
521
522 assert(state->Buffer[I830_DESTREG_DRAWRECT0] != MI_NOOP);
523 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT0]);
524 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT1]);
525 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT2]);
526 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT3]);
527 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT4]);
528 OUT_BATCH(state->Buffer[I830_DESTREG_DRAWRECT5]);
529 ADVANCE_BATCH();
530 }
531
532 if (dirty & I830_UPLOAD_STIPPLE) {
533 DBG("I830_UPLOAD_STIPPLE:\n");
534 emit(intel, state->Stipple, sizeof(state->Stipple));
535 }
536
537 for (i = 0; i < I830_TEX_UNITS; i++) {
538 if ((dirty & I830_UPLOAD_TEX(i))) {
539 DBG("I830_UPLOAD_TEX(%d):\n", i);
540
541 BEGIN_BATCH(I830_TEX_SETUP_SIZE + 1);
542 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0LI]);
543
544 OUT_RELOC(state->tex_buffer[i],
545 I915_GEM_DOMAIN_SAMPLER, 0,
546 state->tex_offset[i]);
547
548 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S1]);
549 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S2]);
550 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S3]);
551 OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S4]);
552 OUT_BATCH(state->Tex[i][I830_TEXREG_MCS]);
553 OUT_BATCH(state->Tex[i][I830_TEXREG_CUBE]);
554
555 ADVANCE_BATCH();
556 }
557
558 if (dirty & I830_UPLOAD_TEXBLEND(i)) {
559 DBG("I830_UPLOAD_TEXBLEND(%d): %d words\n", i,
560 state->TexBlendWordsUsed[i]);
561 emit(intel, state->TexBlend[i], state->TexBlendWordsUsed[i] * 4);
562 }
563 }
564
565 assert(get_dirty(state) == 0);
566 }
567
568 static void
569 i830_destroy_context(struct intel_context *intel)
570 {
571 GLuint i;
572 struct i830_context *i830 = i830_context(&intel->ctx);
573
574 intel_region_release(&i830->state.draw_region);
575 intel_region_release(&i830->state.depth_region);
576
577 for (i = 0; i < I830_TEX_UNITS; i++) {
578 if (i830->state.tex_buffer[i] != NULL) {
579 drm_intel_bo_unreference(i830->state.tex_buffer[i]);
580 i830->state.tex_buffer[i] = NULL;
581 }
582 }
583
584 _tnl_free_vertices(&intel->ctx);
585 }
586
587 static uint32_t i830_render_target_format_for_mesa_format[MESA_FORMAT_COUNT] =
588 {
589 [MESA_FORMAT_ARGB8888] = DV_PF_8888,
590 [MESA_FORMAT_XRGB8888] = DV_PF_8888,
591 [MESA_FORMAT_RGB565] = DV_PF_565,
592 [MESA_FORMAT_ARGB1555] = DV_PF_1555,
593 [MESA_FORMAT_ARGB4444] = DV_PF_4444,
594 };
595
596 static bool
597 i830_render_target_supported(struct intel_context *intel, gl_format format)
598 {
599 if (format == MESA_FORMAT_S8_Z24 ||
600 format == MESA_FORMAT_X8_Z24 ||
601 format == MESA_FORMAT_Z16) {
602 return true;
603 }
604
605 return i830_render_target_format_for_mesa_format[format] != 0;
606 }
607
608 static void
609 i830_set_draw_region(struct intel_context *intel,
610 struct intel_region *color_regions[],
611 struct intel_region *depth_region,
612 GLuint num_regions)
613 {
614 struct i830_context *i830 = i830_context(&intel->ctx);
615 struct gl_context *ctx = &intel->ctx;
616 struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[0];
617 struct intel_renderbuffer *irb = intel_renderbuffer(rb);
618 struct gl_renderbuffer *drb;
619 struct intel_renderbuffer *idrb = NULL;
620 GLuint value;
621 struct i830_hw_state *state = &i830->state;
622 uint32_t draw_x, draw_y;
623
624 if (state->draw_region != color_regions[0]) {
625 intel_region_reference(&state->draw_region, color_regions[0]);
626 }
627 if (state->depth_region != depth_region) {
628 intel_region_reference(&state->depth_region, depth_region);
629 }
630
631 /*
632 * Set stride/cpp values
633 */
634 i915_set_buf_info_for_region(&state->Buffer[I830_DESTREG_CBUFADDR0],
635 color_regions[0], BUF_3D_ID_COLOR_BACK);
636
637 i915_set_buf_info_for_region(&state->Buffer[I830_DESTREG_DBUFADDR0],
638 depth_region, BUF_3D_ID_DEPTH);
639
640 /*
641 * Compute/set I830_DESTREG_DV1 value
642 */
643 value = (DSTORG_HORT_BIAS(0x8) | /* .5 */
644 DSTORG_VERT_BIAS(0x8) | DEPTH_IS_Z); /* .5 */
645
646 if (irb != NULL) {
647 value |= i830_render_target_format_for_mesa_format[irb->Base.Format];
648 }
649
650 if (depth_region && depth_region->cpp == 4) {
651 value |= DEPTH_FRMT_24_FIXED_8_OTHER;
652 }
653 else {
654 value |= DEPTH_FRMT_16_FIXED;
655 }
656 state->Buffer[I830_DESTREG_DV1] = value;
657
658 drb = ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Renderbuffer;
659 if (!drb)
660 drb = ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Renderbuffer;
661
662 if (drb)
663 idrb = intel_renderbuffer(drb);
664
665 /* We set up the drawing rectangle to be offset into the color
666 * region's location in the miptree. If it doesn't match with
667 * depth's offsets, we can't render to it.
668 *
669 * (Well, not actually true -- the hw grew a bit to let depth's
670 * offset get forced to 0,0. We may want to use that if people are
671 * hitting that case. Also, some configurations may be supportable
672 * by tweaking the start offset of the buffers around, which we
673 * can't do in general due to tiling)
674 */
675 FALLBACK(intel, I830_FALLBACK_DRAW_OFFSET,
676 idrb && irb && (idrb->draw_x != irb->draw_x ||
677 idrb->draw_y != irb->draw_y));
678
679 if (irb) {
680 draw_x = irb->draw_x;
681 draw_y = irb->draw_y;
682 } else if (idrb) {
683 draw_x = idrb->draw_x;
684 draw_y = idrb->draw_y;
685 } else {
686 draw_x = 0;
687 draw_y = 0;
688 }
689
690 state->Buffer[I830_DESTREG_DRAWRECT0] = _3DSTATE_DRAWRECT_INFO;
691 state->Buffer[I830_DESTREG_DRAWRECT1] = 0;
692 state->Buffer[I830_DESTREG_DRAWRECT2] = (draw_y << 16) | draw_x;
693 state->Buffer[I830_DESTREG_DRAWRECT3] =
694 ((ctx->DrawBuffer->Width + draw_x) & 0xffff) |
695 ((ctx->DrawBuffer->Height + draw_y) << 16);
696 state->Buffer[I830_DESTREG_DRAWRECT4] = (draw_y << 16) | draw_x;
697 state->Buffer[I830_DESTREG_DRAWRECT5] = MI_NOOP;
698
699 I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
700 }
701
702 /**
703 * Update the hardware state for drawing into a window or framebuffer object.
704 *
705 * Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other
706 * places within the driver.
707 *
708 * Basically, this needs to be called any time the current framebuffer
709 * changes, the renderbuffers change, or we need to draw into different
710 * color buffers.
711 */
712 static void
713 i830_update_draw_buffer(struct intel_context *intel)
714 {
715 struct gl_context *ctx = &intel->ctx;
716 struct gl_framebuffer *fb = ctx->DrawBuffer;
717 struct intel_region *colorRegions[MAX_DRAW_BUFFERS], *depthRegion = NULL;
718 struct intel_renderbuffer *irbDepth = NULL, *irbStencil = NULL;
719
720 if (!fb) {
721 /* this can happen during the initial context initialization */
722 return;
723 }
724
725 irbDepth = intel_get_renderbuffer(fb, BUFFER_DEPTH);
726 irbStencil = intel_get_renderbuffer(fb, BUFFER_STENCIL);
727
728 /* Do this here, not core Mesa, since this function is called from
729 * many places within the driver.
730 */
731 if (ctx->NewState & _NEW_BUFFERS) {
732 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
733 _mesa_update_framebuffer(ctx);
734 /* this updates the DrawBuffer's Width/Height if it's a FBO */
735 _mesa_update_draw_buffer_bounds(ctx);
736 }
737
738 if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
739 /* this may occur when we're called by glBindFrameBuffer() during
740 * the process of someone setting up renderbuffers, etc.
741 */
742 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
743 return;
744 }
745
746 /* How many color buffers are we drawing into?
747 *
748 * If there are zero buffers or the buffer is too big, don't configure any
749 * regions for hardware drawing. We'll fallback to software below. Not
750 * having regions set makes some of the software fallback paths faster.
751 */
752 if ((fb->Width > ctx->Const.MaxRenderbufferSize)
753 || (fb->Height > ctx->Const.MaxRenderbufferSize)
754 || (fb->_NumColorDrawBuffers == 0)) {
755 /* writing to 0 */
756 colorRegions[0] = NULL;
757 }
758 else if (fb->_NumColorDrawBuffers > 1) {
759 int i;
760 struct intel_renderbuffer *irb;
761
762 for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
763 irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
764 colorRegions[i] = (irb && irb->mt) ? irb->mt->region : NULL;
765 }
766 }
767 else {
768 /* Get the intel_renderbuffer for the single colorbuffer we're drawing
769 * into.
770 */
771 if (fb->Name == 0) {
772 /* drawing to window system buffer */
773 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT)
774 colorRegions[0] = intel_get_rb_region(fb, BUFFER_FRONT_LEFT);
775 else
776 colorRegions[0] = intel_get_rb_region(fb, BUFFER_BACK_LEFT);
777 }
778 else {
779 /* drawing to user-created FBO */
780 struct intel_renderbuffer *irb;
781 irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
782 colorRegions[0] = (irb && irb->mt->region) ? irb->mt->region : NULL;
783 }
784 }
785
786 if (!colorRegions[0]) {
787 FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, true);
788 }
789 else {
790 FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, false);
791 }
792
793 /* Check for depth fallback. */
794 if (irbDepth && irbDepth->mt) {
795 FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
796 depthRegion = irbDepth->mt->region;
797 } else if (irbDepth && !irbDepth->mt) {
798 FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, true);
799 depthRegion = NULL;
800 } else { /* !irbDepth */
801 /* No fallback is needed because there is no depth buffer. */
802 FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, false);
803 depthRegion = NULL;
804 }
805
806 /* Check for stencil fallback. */
807 if (irbStencil && irbStencil->mt) {
808 assert(irbStencil->Base.Format == MESA_FORMAT_S8_Z24);
809 FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
810 } else if (irbStencil && !irbStencil->mt) {
811 FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, true);
812 } else { /* !irbStencil */
813 /* No fallback is needed because there is no stencil buffer. */
814 FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, false);
815 }
816
817 /* If we have a (packed) stencil buffer attached but no depth buffer,
818 * we still need to set up the shared depth/stencil state so we can use it.
819 */
820 if (depthRegion == NULL && irbStencil && irbStencil->mt
821 && irbStencil->Base.Format == MESA_FORMAT_S8_Z24) {
822 depthRegion = irbStencil->mt->region;
823 }
824
825 /*
826 * Update depth and stencil test state
827 */
828 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
829 (ctx->Depth.Test && fb->Visual.depthBits > 0));
830 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
831 (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
832
833 intel->vtbl.set_draw_region(intel, colorRegions, depthRegion,
834 fb->_NumColorDrawBuffers);
835 intel->NewGLState |= _NEW_BUFFERS;
836
837 /* update viewport since it depends on window size */
838 intelCalcViewport(ctx);
839
840 /* Set state we know depends on drawable parameters:
841 */
842 ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
843 ctx->Scissor.Width, ctx->Scissor.Height);
844
845 ctx->Driver.DepthRange(ctx, ctx->Viewport.Near, ctx->Viewport.Far);
846
847 /* Update culling direction which changes depending on the
848 * orientation of the buffer:
849 */
850 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
851 }
852
853 /* This isn't really handled at the moment.
854 */
855 static void
856 i830_new_batch(struct intel_context *intel)
857 {
858 struct i830_context *i830 = i830_context(&intel->ctx);
859 i830->state.emitted = 0;
860 }
861
862 static void
863 i830_assert_not_dirty( struct intel_context *intel )
864 {
865 struct i830_context *i830 = i830_context(&intel->ctx);
866 assert(!get_dirty(&i830->state));
867 (void) i830;
868 }
869
870 static void
871 i830_invalidate_state(struct intel_context *intel, GLuint new_state)
872 {
873 struct gl_context *ctx = &intel->ctx;
874
875 _swsetup_InvalidateState(ctx, new_state);
876 _tnl_InvalidateState(ctx, new_state);
877 _tnl_invalidate_vertex_state(ctx, new_state);
878
879 if (new_state & _NEW_LIGHT)
880 i830_update_provoking_vertex(&intel->ctx);
881 }
882
883 static bool
884 i830_is_hiz_depth_format(struct intel_context *intel, gl_format format)
885 {
886 return false;
887 }
888
889 void
890 i830InitVtbl(struct i830_context *i830)
891 {
892 i830->intel.vtbl.check_vertex_size = i830_check_vertex_size;
893 i830->intel.vtbl.destroy = i830_destroy_context;
894 i830->intel.vtbl.emit_state = i830_emit_state;
895 i830->intel.vtbl.new_batch = i830_new_batch;
896 i830->intel.vtbl.reduced_primitive_state = i830_reduced_primitive_state;
897 i830->intel.vtbl.set_draw_region = i830_set_draw_region;
898 i830->intel.vtbl.update_draw_buffer = i830_update_draw_buffer;
899 i830->intel.vtbl.update_texture_state = i830UpdateTextureState;
900 i830->intel.vtbl.render_start = i830_render_start;
901 i830->intel.vtbl.render_prevalidate = i830_render_prevalidate;
902 i830->intel.vtbl.assert_not_dirty = i830_assert_not_dirty;
903 i830->intel.vtbl.finish_batch = intel_finish_vb;
904 i830->intel.vtbl.invalidate_state = i830_invalidate_state;
905 i830->intel.vtbl.render_target_supported = i830_render_target_supported;
906 i830->intel.vtbl.is_hiz_depth_format = i830_is_hiz_depth_format;
907 }