1 /**************************************************************************
3 * Copyright 2003 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "i830_context.h"
30 #include "intel_batchbuffer.h"
31 #include "intel_mipmap_tree.h"
32 #include "intel_regions.h"
33 #include "intel_tris.h"
34 #include "intel_fbo.h"
35 #include "intel_buffers.h"
37 #include "tnl/t_context.h"
38 #include "tnl/t_vertex.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "main/renderbuffer.h"
41 #include "main/framebuffer.h"
42 #include "main/fbobject.h"
44 #define FILE_DEBUG_FLAG DEBUG_STATE
46 static bool i830_check_vertex_size(struct intel_context
*intel
,
49 #define SZ_TO_HW(sz) ((sz-2)&0x3)
50 #define EMIT_SZ(sz) (EMIT_1F + (sz) - 1)
51 #define EMIT_ATTR( ATTR, STYLE, V0 ) \
53 intel->vertex_attrs[intel->vertex_attr_count].attrib = (ATTR); \
54 intel->vertex_attrs[intel->vertex_attr_count].format = (STYLE); \
55 intel->vertex_attr_count++; \
59 #define EMIT_PAD( N ) \
61 intel->vertex_attrs[intel->vertex_attr_count].attrib = 0; \
62 intel->vertex_attrs[intel->vertex_attr_count].format = EMIT_PAD; \
63 intel->vertex_attrs[intel->vertex_attr_count].offset = (N); \
64 intel->vertex_attr_count++; \
68 #define VRTX_TEX_SET_FMT(n, x) ((x)<<((n)*2))
69 #define TEXBIND_SET(n, x) ((x)<<((n)*4))
72 i830_render_prevalidate(struct intel_context
*intel
)
77 i830_render_start(struct intel_context
*intel
)
79 struct gl_context
*ctx
= &intel
->ctx
;
80 struct i830_context
*i830
= i830_context(ctx
);
81 TNLcontext
*tnl
= TNL_CONTEXT(ctx
);
82 struct vertex_buffer
*VB
= &tnl
->vb
;
83 GLbitfield64 index_bitset
= tnl
->render_inputs_bitset
;
84 GLuint v0
= _3DSTATE_VFT0_CMD
;
85 GLuint v2
= _3DSTATE_VFT1_CMD
;
90 VB
->AttribPtr
[VERT_ATTRIB_POS
] = VB
->NdcPtr
;
91 intel
->vertex_attr_count
= 0;
93 /* EMIT_ATTR's must be in order as they tell t_vertex.c how to
94 * build up a hardware vertex.
96 if (index_bitset
& BITFIELD64_RANGE(_TNL_ATTRIB_TEX0
, _TNL_NUM_TEX
)) {
97 EMIT_ATTR(_TNL_ATTRIB_POS
, EMIT_4F_VIEWPORT
, VFT0_XYZW
);
98 intel
->coloroffset
= 4;
101 EMIT_ATTR(_TNL_ATTRIB_POS
, EMIT_3F_VIEWPORT
, VFT0_XYZ
);
102 intel
->coloroffset
= 3;
105 if (index_bitset
& BITFIELD64_BIT(_TNL_ATTRIB_POINTSIZE
)) {
106 EMIT_ATTR(_TNL_ATTRIB_POINTSIZE
, EMIT_1F
, VFT0_POINT_WIDTH
);
109 EMIT_ATTR(_TNL_ATTRIB_COLOR0
, EMIT_4UB_4F_BGRA
, VFT0_DIFFUSE
);
111 intel
->specoffset
= 0;
112 if (index_bitset
& (BITFIELD64_BIT(_TNL_ATTRIB_COLOR1
) |
113 BITFIELD64_BIT(_TNL_ATTRIB_FOG
))) {
114 if (index_bitset
& BITFIELD64_BIT(_TNL_ATTRIB_COLOR1
)) {
115 intel
->specoffset
= intel
->coloroffset
+ 1;
116 EMIT_ATTR(_TNL_ATTRIB_COLOR1
, EMIT_3UB_3F_BGR
, VFT0_SPEC
);
121 if (index_bitset
& BITFIELD64_BIT(_TNL_ATTRIB_FOG
))
122 EMIT_ATTR(_TNL_ATTRIB_FOG
, EMIT_1UB_1F
, VFT0_SPEC
);
127 if (index_bitset
& BITFIELD64_RANGE(_TNL_ATTRIB_TEX0
, _TNL_NUM_TEX
)) {
130 for (i
= 0; i
< I830_TEX_UNITS
; i
++) {
131 if (index_bitset
& BITFIELD64_BIT(_TNL_ATTRIB_TEX(i
))) {
132 GLuint sz
= VB
->AttribPtr
[_TNL_ATTRIB_TEX0
+ i
]->size
;
134 GLuint mcs
= (i830
->state
.Tex
[i
][I830_TEXREG_MCS
] &
137 if (intel
->ctx
.Texture
.Unit
[i
]._Current
->Target
== GL_TEXTURE_CUBE_MAP
) {
140 mcs
|= TEXCOORDTYPE_VECTOR
;
148 mcs
|= TEXCOORDTYPE_CARTESIAN
;
153 mcs
|= TEXCOORDTYPE_HOMOGENEOUS
;
160 EMIT_ATTR(_TNL_ATTRIB_TEX0
+ i
, emit
, 0);
161 v2
|= VRTX_TEX_SET_FMT(count
, SZ_TO_HW(sz
));
162 mcsb1
|= (count
+ 8) << (i
* 4);
164 if (mcs
!= i830
->state
.Tex
[i
][I830_TEXREG_MCS
]) {
165 I830_STATECHANGE(i830
, I830_UPLOAD_TEX(i
));
166 i830
->state
.Tex
[i
][I830_TEXREG_MCS
] = mcs
;
173 v0
|= VFT0_TEX_COUNT(count
);
176 /* Only need to change the vertex emit code if there has been a
177 * statechange to a new hardware vertex format:
179 if (v0
!= i830
->state
.Ctx
[I830_CTXREG_VF
] ||
180 v2
!= i830
->state
.Ctx
[I830_CTXREG_VF2
] ||
181 mcsb1
!= i830
->state
.Ctx
[I830_CTXREG_MCSB1
] ||
182 index_bitset
!= i830
->last_index_bitset
) {
183 I830_STATECHANGE(i830
, I830_UPLOAD_CTX
);
185 /* Must do this *after* statechange, so as not to affect
186 * buffered vertices reliant on the old state:
189 _tnl_install_attrs(ctx
,
191 intel
->vertex_attr_count
,
192 intel
->ViewportMatrix
.m
, 0);
194 intel
->vertex_size
>>= 2;
196 i830
->state
.Ctx
[I830_CTXREG_VF
] = v0
;
197 i830
->state
.Ctx
[I830_CTXREG_VF2
] = v2
;
198 i830
->state
.Ctx
[I830_CTXREG_MCSB1
] = mcsb1
;
199 i830
->last_index_bitset
= index_bitset
;
201 assert(i830_check_vertex_size(intel
, intel
->vertex_size
));
206 i830_reduced_primitive_state(struct intel_context
*intel
, GLenum rprim
)
208 struct i830_context
*i830
= i830_context(&intel
->ctx
);
209 GLuint st1
= i830
->state
.Stipple
[I830_STPREG_ST1
];
215 if (intel
->ctx
.Polygon
.StippleFlag
&& intel
->hw_stipple
)
224 i830
->intel
.reduced_primitive
= rprim
;
226 if (st1
!= i830
->state
.Stipple
[I830_STPREG_ST1
]) {
227 INTEL_FIREVERTICES(intel
);
229 I830_STATECHANGE(i830
, I830_UPLOAD_STIPPLE
);
230 i830
->state
.Stipple
[I830_STPREG_ST1
] = st1
;
234 /* Pull apart the vertex format registers and figure out how large a
235 * vertex is supposed to be.
238 i830_check_vertex_size(struct intel_context
*intel
, GLuint expected
)
240 struct i830_context
*i830
= i830_context(&intel
->ctx
);
241 int vft0
= i830
->state
.Ctx
[I830_CTXREG_VF
];
242 int vft1
= i830
->state
.Ctx
[I830_CTXREG_VF2
];
243 int nrtex
= (vft0
& VFT0_TEX_COUNT_MASK
) >> VFT0_TEX_COUNT_SHIFT
;
246 switch (vft0
& VFT0_XYZW_MASK
) {
260 fprintf(stderr
, "no xyzw specified\n");
264 if (vft0
& VFT0_SPEC
)
266 if (vft0
& VFT0_DIFFUSE
)
268 if (vft0
& VFT0_DEPTH_OFFSET
)
270 if (vft0
& VFT0_POINT_WIDTH
)
273 for (i
= 0; i
< nrtex
; i
++) {
274 switch (vft1
& VFT1_TEX0_MASK
) {
288 vft1
>>= VFT1_TEX1_SHIFT
;
292 fprintf(stderr
, "vertex size mismatch %d/%d\n", sz
, expected
);
294 return sz
== expected
;
298 i830_emit_invarient_state(struct intel_context
*intel
)
304 OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD
);
307 OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD
);
310 OUT_BATCH(_3DSTATE_DFLT_Z_CMD
);
313 OUT_BATCH(_3DSTATE_FOG_MODE_CMD
);
314 OUT_BATCH(FOGFUNC_ENABLE
|
315 FOG_LINEAR_CONST
| FOGSRC_INDEX_Z
| ENABLE_FOG_DENSITY
);
320 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD
|
322 DISABLE_TEX_STREAM_BUMP
|
323 ENABLE_TEX_STREAM_COORD_SET
|
324 TEX_STREAM_COORD_SET(0) |
325 ENABLE_TEX_STREAM_MAP_IDX
| TEX_STREAM_MAP_IDX(0));
326 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD
|
328 DISABLE_TEX_STREAM_BUMP
|
329 ENABLE_TEX_STREAM_COORD_SET
|
330 TEX_STREAM_COORD_SET(1) |
331 ENABLE_TEX_STREAM_MAP_IDX
| TEX_STREAM_MAP_IDX(1));
332 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD
|
334 DISABLE_TEX_STREAM_BUMP
|
335 ENABLE_TEX_STREAM_COORD_SET
|
336 TEX_STREAM_COORD_SET(2) |
337 ENABLE_TEX_STREAM_MAP_IDX
| TEX_STREAM_MAP_IDX(2));
338 OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD
|
340 DISABLE_TEX_STREAM_BUMP
|
341 ENABLE_TEX_STREAM_COORD_SET
|
342 TEX_STREAM_COORD_SET(3) |
343 ENABLE_TEX_STREAM_MAP_IDX
| TEX_STREAM_MAP_IDX(3));
345 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM
);
346 OUT_BATCH(DISABLE_TEX_TRANSFORM
| TEXTURE_SET(0));
347 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM
);
348 OUT_BATCH(DISABLE_TEX_TRANSFORM
| TEXTURE_SET(1));
349 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM
);
350 OUT_BATCH(DISABLE_TEX_TRANSFORM
| TEXTURE_SET(2));
351 OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM
);
352 OUT_BATCH(DISABLE_TEX_TRANSFORM
| TEXTURE_SET(3));
354 OUT_BATCH(_3DSTATE_VERTEX_TRANSFORM
);
355 OUT_BATCH(DISABLE_VIEWPORT_TRANSFORM
| DISABLE_PERSPECTIVE_DIVIDE
);
357 OUT_BATCH(_3DSTATE_W_STATE_CMD
);
358 OUT_BATCH(MAGIC_W_STATE_DWORD1
);
359 OUT_BATCH(0x3f800000 /* 1.0 in IEEE float */ );
362 OUT_BATCH(_3DSTATE_COLOR_FACTOR_CMD
);
363 OUT_BATCH(0x80808080); /* .5 required in alpha for GL_DOT3_RGBA_EXT */
369 #define emit( intel, state, size ) \
370 intel_batchbuffer_data(intel, state, size)
373 get_dirty(struct i830_hw_state
*state
)
375 return state
->active
& ~state
->emitted
;
379 get_state_size(struct i830_hw_state
*state
)
381 GLuint dirty
= get_dirty(state
);
385 if (dirty
& I830_UPLOAD_INVARIENT
)
386 sz
+= 40 * sizeof(int);
388 if (dirty
& I830_UPLOAD_RASTER_RULES
)
389 sz
+= sizeof(state
->RasterRules
);
391 if (dirty
& I830_UPLOAD_CTX
)
392 sz
+= sizeof(state
->Ctx
);
394 if (dirty
& I830_UPLOAD_BUFFERS
)
395 sz
+= sizeof(state
->Buffer
);
397 if (dirty
& I830_UPLOAD_STIPPLE
)
398 sz
+= sizeof(state
->Stipple
);
400 for (i
= 0; i
< I830_TEX_UNITS
; i
++) {
401 if ((dirty
& I830_UPLOAD_TEX(i
)))
402 sz
+= sizeof(state
->Tex
[i
]);
404 if (dirty
& I830_UPLOAD_TEXBLEND(i
))
405 sz
+= state
->TexBlendWordsUsed
[i
] * 4;
412 /* Push the state into the sarea and/or texture memory.
415 i830_emit_state(struct intel_context
*intel
)
417 struct i830_context
*i830
= i830_context(&intel
->ctx
);
418 struct i830_hw_state
*state
= &i830
->state
;
421 drm_intel_bo
*aper_array
[3 + I830_TEX_UNITS
];
423 GET_CURRENT_CONTEXT(ctx
);
426 /* We don't hold the lock at this point, so want to make sure that
427 * there won't be a buffer wrap between the state emits and the primitive
430 * It might be better to talk about explicit places where
431 * scheduling is allowed, rather than assume that it is whenever a
432 * batchbuffer fills up.
434 intel_batchbuffer_require_space(intel
,
435 get_state_size(state
) +
436 INTEL_PRIM_EMIT_SIZE
);
440 dirty
= get_dirty(state
);
442 aper_array
[aper_count
++] = intel
->batch
.bo
;
443 if (dirty
& I830_UPLOAD_BUFFERS
) {
444 aper_array
[aper_count
++] = state
->draw_region
->bo
;
445 if (state
->depth_region
)
446 aper_array
[aper_count
++] = state
->depth_region
->bo
;
449 for (i
= 0; i
< I830_TEX_UNITS
; i
++)
450 if (dirty
& I830_UPLOAD_TEX(i
)) {
451 if (state
->tex_buffer
[i
]) {
452 aper_array
[aper_count
++] = state
->tex_buffer
[i
];
456 if (dri_bufmgr_check_aperture_space(aper_array
, aper_count
)) {
459 intel_batchbuffer_flush(intel
);
462 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "i830 emit state");
468 /* Do this here as we may have flushed the batchbuffer above,
469 * causing more state to be dirty!
471 dirty
= get_dirty(state
);
472 state
->emitted
|= dirty
;
473 assert(get_dirty(state
) == 0);
475 if (dirty
& I830_UPLOAD_INVARIENT
) {
476 DBG("I830_UPLOAD_INVARIENT:\n");
477 i830_emit_invarient_state(intel
);
480 if (dirty
& I830_UPLOAD_RASTER_RULES
) {
481 DBG("I830_UPLOAD_RASTER_RULES:\n");
482 emit(intel
, state
->RasterRules
, sizeof(state
->RasterRules
));
485 if (dirty
& I830_UPLOAD_CTX
) {
486 DBG("I830_UPLOAD_CTX:\n");
487 emit(intel
, state
->Ctx
, sizeof(state
->Ctx
));
491 if (dirty
& I830_UPLOAD_BUFFERS
) {
494 DBG("I830_UPLOAD_BUFFERS:\n");
496 if (state
->depth_region
)
500 OUT_BATCH(state
->Buffer
[I830_DESTREG_CBUFADDR0
]);
501 OUT_BATCH(state
->Buffer
[I830_DESTREG_CBUFADDR1
]);
502 OUT_RELOC(state
->draw_region
->bo
,
503 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
, 0);
505 if (state
->depth_region
) {
506 OUT_BATCH(state
->Buffer
[I830_DESTREG_DBUFADDR0
]);
507 OUT_BATCH(state
->Buffer
[I830_DESTREG_DBUFADDR1
]);
508 OUT_RELOC(state
->depth_region
->bo
,
509 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
, 0);
512 OUT_BATCH(state
->Buffer
[I830_DESTREG_DV0
]);
513 OUT_BATCH(state
->Buffer
[I830_DESTREG_DV1
]);
514 OUT_BATCH(state
->Buffer
[I830_DESTREG_SR0
]);
515 OUT_BATCH(state
->Buffer
[I830_DESTREG_SR1
]);
516 OUT_BATCH(state
->Buffer
[I830_DESTREG_SR2
]);
517 OUT_BATCH(state
->Buffer
[I830_DESTREG_SENABLE
]);
519 assert(state
->Buffer
[I830_DESTREG_DRAWRECT0
] != MI_NOOP
);
520 OUT_BATCH(state
->Buffer
[I830_DESTREG_DRAWRECT0
]);
521 OUT_BATCH(state
->Buffer
[I830_DESTREG_DRAWRECT1
]);
522 OUT_BATCH(state
->Buffer
[I830_DESTREG_DRAWRECT2
]);
523 OUT_BATCH(state
->Buffer
[I830_DESTREG_DRAWRECT3
]);
524 OUT_BATCH(state
->Buffer
[I830_DESTREG_DRAWRECT4
]);
525 OUT_BATCH(state
->Buffer
[I830_DESTREG_DRAWRECT5
]);
529 if (dirty
& I830_UPLOAD_STIPPLE
) {
530 DBG("I830_UPLOAD_STIPPLE:\n");
531 emit(intel
, state
->Stipple
, sizeof(state
->Stipple
));
534 for (i
= 0; i
< I830_TEX_UNITS
; i
++) {
535 if ((dirty
& I830_UPLOAD_TEX(i
))) {
536 DBG("I830_UPLOAD_TEX(%d):\n", i
);
538 BEGIN_BATCH(I830_TEX_SETUP_SIZE
+ 1);
539 OUT_BATCH(state
->Tex
[i
][I830_TEXREG_TM0LI
]);
541 OUT_RELOC(state
->tex_buffer
[i
],
542 I915_GEM_DOMAIN_SAMPLER
, 0,
543 state
->tex_offset
[i
]);
545 OUT_BATCH(state
->Tex
[i
][I830_TEXREG_TM0S1
]);
546 OUT_BATCH(state
->Tex
[i
][I830_TEXREG_TM0S2
]);
547 OUT_BATCH(state
->Tex
[i
][I830_TEXREG_TM0S3
]);
548 OUT_BATCH(state
->Tex
[i
][I830_TEXREG_TM0S4
]);
549 OUT_BATCH(state
->Tex
[i
][I830_TEXREG_MCS
]);
550 OUT_BATCH(state
->Tex
[i
][I830_TEXREG_CUBE
]);
555 if (dirty
& I830_UPLOAD_TEXBLEND(i
)) {
556 DBG("I830_UPLOAD_TEXBLEND(%d): %d words\n", i
,
557 state
->TexBlendWordsUsed
[i
]);
558 emit(intel
, state
->TexBlend
[i
], state
->TexBlendWordsUsed
[i
] * 4);
562 assert(get_dirty(state
) == 0);
566 i830_destroy_context(struct intel_context
*intel
)
569 struct i830_context
*i830
= i830_context(&intel
->ctx
);
571 intel_region_release(&i830
->state
.draw_region
);
572 intel_region_release(&i830
->state
.depth_region
);
574 for (i
= 0; i
< I830_TEX_UNITS
; i
++) {
575 if (i830
->state
.tex_buffer
[i
] != NULL
) {
576 drm_intel_bo_unreference(i830
->state
.tex_buffer
[i
]);
577 i830
->state
.tex_buffer
[i
] = NULL
;
581 _tnl_free_vertices(&intel
->ctx
);
584 static uint32_t i830_render_target_format_for_mesa_format
[MESA_FORMAT_COUNT
] =
586 [MESA_FORMAT_B8G8R8A8_UNORM
] = DV_PF_8888
,
587 [MESA_FORMAT_B8G8R8X8_UNORM
] = DV_PF_8888
,
588 [MESA_FORMAT_B5G6R5_UNORM
] = DV_PF_565
,
589 [MESA_FORMAT_B5G5R5A1_UNORM
] = DV_PF_1555
,
590 [MESA_FORMAT_B4G4R4A4_UNORM
] = DV_PF_4444
,
594 i830_render_target_supported(struct intel_context
*intel
,
595 struct gl_renderbuffer
*rb
)
597 mesa_format format
= rb
->Format
;
599 if (format
== MESA_FORMAT_Z24_UNORM_S8_UINT
||
600 format
== MESA_FORMAT_Z24_UNORM_X8_UINT
||
601 format
== MESA_FORMAT_Z_UNORM16
) {
605 return i830_render_target_format_for_mesa_format
[format
] != 0;
609 i830_set_draw_region(struct intel_context
*intel
,
610 struct intel_region
*color_regions
[],
611 struct intel_region
*depth_region
,
614 struct i830_context
*i830
= i830_context(&intel
->ctx
);
615 struct gl_context
*ctx
= &intel
->ctx
;
616 struct gl_renderbuffer
*rb
= ctx
->DrawBuffer
->_ColorDrawBuffers
[0];
617 struct intel_renderbuffer
*irb
= intel_renderbuffer(rb
);
618 struct gl_renderbuffer
*drb
;
619 struct intel_renderbuffer
*idrb
= NULL
;
621 struct i830_hw_state
*state
= &i830
->state
;
622 uint32_t draw_x
, draw_y
;
624 if (state
->draw_region
!= color_regions
[0]) {
625 intel_region_reference(&state
->draw_region
, color_regions
[0]);
627 if (state
->depth_region
!= depth_region
) {
628 intel_region_reference(&state
->depth_region
, depth_region
);
632 * Set stride/cpp values
634 i915_set_buf_info_for_region(&state
->Buffer
[I830_DESTREG_CBUFADDR0
],
635 color_regions
[0], BUF_3D_ID_COLOR_BACK
);
637 i915_set_buf_info_for_region(&state
->Buffer
[I830_DESTREG_DBUFADDR0
],
638 depth_region
, BUF_3D_ID_DEPTH
);
641 * Compute/set I830_DESTREG_DV1 value
643 value
= (DSTORG_HORT_BIAS(0x8) | /* .5 */
644 DSTORG_VERT_BIAS(0x8) | DEPTH_IS_Z
); /* .5 */
647 value
|= i830_render_target_format_for_mesa_format
[intel_rb_format(irb
)];
650 if (depth_region
&& depth_region
->cpp
== 4) {
651 value
|= DEPTH_FRMT_24_FIXED_8_OTHER
;
654 value
|= DEPTH_FRMT_16_FIXED
;
656 state
->Buffer
[I830_DESTREG_DV1
] = value
;
658 drb
= ctx
->DrawBuffer
->Attachment
[BUFFER_DEPTH
].Renderbuffer
;
660 drb
= ctx
->DrawBuffer
->Attachment
[BUFFER_STENCIL
].Renderbuffer
;
663 idrb
= intel_renderbuffer(drb
);
665 /* We set up the drawing rectangle to be offset into the color
666 * region's location in the miptree. If it doesn't match with
667 * depth's offsets, we can't render to it.
669 * (Well, not actually true -- the hw grew a bit to let depth's
670 * offset get forced to 0,0. We may want to use that if people are
671 * hitting that case. Also, some configurations may be supportable
672 * by tweaking the start offset of the buffers around, which we
673 * can't do in general due to tiling)
675 FALLBACK(intel
, I830_FALLBACK_DRAW_OFFSET
,
676 idrb
&& irb
&& (idrb
->draw_x
!= irb
->draw_x
||
677 idrb
->draw_y
!= irb
->draw_y
));
680 draw_x
= irb
->draw_x
;
681 draw_y
= irb
->draw_y
;
683 draw_x
= idrb
->draw_x
;
684 draw_y
= idrb
->draw_y
;
690 state
->Buffer
[I830_DESTREG_DRAWRECT0
] = _3DSTATE_DRAWRECT_INFO
;
691 state
->Buffer
[I830_DESTREG_DRAWRECT1
] = 0;
692 state
->Buffer
[I830_DESTREG_DRAWRECT2
] = (draw_y
<< 16) | draw_x
;
693 state
->Buffer
[I830_DESTREG_DRAWRECT3
] =
694 ((ctx
->DrawBuffer
->Width
+ draw_x
- 1) & 0xffff) |
695 ((ctx
->DrawBuffer
->Height
+ draw_y
- 1) << 16);
696 state
->Buffer
[I830_DESTREG_DRAWRECT4
] = (draw_y
<< 16) | draw_x
;
697 state
->Buffer
[I830_DESTREG_DRAWRECT5
] = MI_NOOP
;
699 I830_STATECHANGE(i830
, I830_UPLOAD_BUFFERS
);
703 * Update the hardware state for drawing into a window or framebuffer object.
705 * Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other
706 * places within the driver.
708 * Basically, this needs to be called any time the current framebuffer
709 * changes, the renderbuffers change, or we need to draw into different
713 i830_update_draw_buffer(struct intel_context
*intel
)
715 struct gl_context
*ctx
= &intel
->ctx
;
716 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
717 struct intel_region
*colorRegions
[MAX_DRAW_BUFFERS
], *depthRegion
= NULL
;
718 struct intel_renderbuffer
*irbDepth
= NULL
, *irbStencil
= NULL
;
721 /* this can happen during the initial context initialization */
725 irbDepth
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
726 irbStencil
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
728 /* Do this here, not core Mesa, since this function is called from
729 * many places within the driver.
731 if (ctx
->NewState
& _NEW_BUFFERS
) {
732 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
733 _mesa_update_framebuffer(ctx
, ctx
->ReadBuffer
, ctx
->DrawBuffer
);
734 /* this updates the DrawBuffer's Width/Height if it's a FBO */
735 _mesa_update_draw_buffer_bounds(ctx
, ctx
->DrawBuffer
);
738 if (fb
->_Status
!= GL_FRAMEBUFFER_COMPLETE_EXT
) {
739 /* this may occur when we're called by glBindFrameBuffer() during
740 * the process of someone setting up renderbuffers, etc.
742 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
746 /* How many color buffers are we drawing into?
748 * If there are zero buffers or the buffer is too big, don't configure any
749 * regions for hardware drawing. We'll fallback to software below. Not
750 * having regions set makes some of the software fallback paths faster.
752 if ((fb
->Width
> ctx
->Const
.MaxRenderbufferSize
)
753 || (fb
->Height
> ctx
->Const
.MaxRenderbufferSize
)
754 || (fb
->_NumColorDrawBuffers
== 0)) {
756 colorRegions
[0] = NULL
;
758 else if (fb
->_NumColorDrawBuffers
> 1) {
760 struct intel_renderbuffer
*irb
;
762 for (i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
763 irb
= intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
764 colorRegions
[i
] = (irb
&& irb
->mt
) ? irb
->mt
->region
: NULL
;
768 /* Get the intel_renderbuffer for the single colorbuffer we're drawing
771 if (_mesa_is_winsys_fbo(fb
)) {
772 /* drawing to window system buffer */
773 if (fb
->_ColorDrawBufferIndexes
[0] == BUFFER_FRONT_LEFT
)
774 colorRegions
[0] = intel_get_rb_region(fb
, BUFFER_FRONT_LEFT
);
776 colorRegions
[0] = intel_get_rb_region(fb
, BUFFER_BACK_LEFT
);
779 /* drawing to user-created FBO */
780 struct intel_renderbuffer
*irb
;
781 irb
= intel_renderbuffer(fb
->_ColorDrawBuffers
[0]);
782 colorRegions
[0] = (irb
&& irb
->mt
->region
) ? irb
->mt
->region
: NULL
;
786 if (!colorRegions
[0]) {
787 FALLBACK(intel
, INTEL_FALLBACK_DRAW_BUFFER
, true);
790 FALLBACK(intel
, INTEL_FALLBACK_DRAW_BUFFER
, false);
793 /* Check for depth fallback. */
794 if (irbDepth
&& irbDepth
->mt
) {
795 FALLBACK(intel
, INTEL_FALLBACK_DEPTH_BUFFER
, false);
796 depthRegion
= irbDepth
->mt
->region
;
797 } else if (irbDepth
&& !irbDepth
->mt
) {
798 FALLBACK(intel
, INTEL_FALLBACK_DEPTH_BUFFER
, true);
800 } else { /* !irbDepth */
801 /* No fallback is needed because there is no depth buffer. */
802 FALLBACK(intel
, INTEL_FALLBACK_DEPTH_BUFFER
, false);
806 /* Check for stencil fallback. */
807 if (irbStencil
&& irbStencil
->mt
) {
808 assert(intel_rb_format(irbStencil
) == MESA_FORMAT_Z24_UNORM_S8_UINT
);
809 FALLBACK(intel
, INTEL_FALLBACK_STENCIL_BUFFER
, false);
810 } else if (irbStencil
&& !irbStencil
->mt
) {
811 FALLBACK(intel
, INTEL_FALLBACK_STENCIL_BUFFER
, true);
812 } else { /* !irbStencil */
813 /* No fallback is needed because there is no stencil buffer. */
814 FALLBACK(intel
, INTEL_FALLBACK_STENCIL_BUFFER
, false);
817 /* If we have a (packed) stencil buffer attached but no depth buffer,
818 * we still need to set up the shared depth/stencil state so we can use it.
820 if (depthRegion
== NULL
&& irbStencil
&& irbStencil
->mt
821 && intel_rb_format(irbStencil
) == MESA_FORMAT_Z24_UNORM_S8_UINT
) {
822 depthRegion
= irbStencil
->mt
->region
;
826 * Update depth and stencil test state
828 ctx
->Driver
.Enable(ctx
, GL_DEPTH_TEST
, ctx
->Depth
.Test
);
829 ctx
->Driver
.Enable(ctx
, GL_STENCIL_TEST
,
830 (ctx
->Stencil
.Enabled
&& fb
->Visual
.stencilBits
> 0));
832 intel
->vtbl
.set_draw_region(intel
, colorRegions
, depthRegion
,
833 fb
->_NumColorDrawBuffers
);
834 intel
->NewGLState
|= _NEW_BUFFERS
;
836 /* Set state we know depends on drawable parameters:
838 intelCalcViewport(ctx
);
839 ctx
->Driver
.Scissor(ctx
);
841 /* Update culling direction which changes depending on the
842 * orientation of the buffer:
844 ctx
->Driver
.FrontFace(ctx
, ctx
->Polygon
.FrontFace
);
847 /* This isn't really handled at the moment.
850 i830_new_batch(struct intel_context
*intel
)
852 struct i830_context
*i830
= i830_context(&intel
->ctx
);
853 i830
->state
.emitted
= 0;
857 i830_assert_not_dirty( struct intel_context
*intel
)
859 struct i830_context
*i830
= i830_context(&intel
->ctx
);
860 assert(!get_dirty(&i830
->state
));
865 i830_invalidate_state(struct intel_context
*intel
, GLuint new_state
)
867 struct gl_context
*ctx
= &intel
->ctx
;
869 _swsetup_InvalidateState(ctx
, new_state
);
870 _tnl_InvalidateState(ctx
, new_state
);
871 _tnl_invalidate_vertex_state(ctx
, new_state
);
873 if (new_state
& _NEW_LIGHT
)
874 i830_update_provoking_vertex(&intel
->ctx
);
878 i830InitVtbl(struct i830_context
*i830
)
880 i830
->intel
.vtbl
.check_vertex_size
= i830_check_vertex_size
;
881 i830
->intel
.vtbl
.destroy
= i830_destroy_context
;
882 i830
->intel
.vtbl
.emit_state
= i830_emit_state
;
883 i830
->intel
.vtbl
.new_batch
= i830_new_batch
;
884 i830
->intel
.vtbl
.reduced_primitive_state
= i830_reduced_primitive_state
;
885 i830
->intel
.vtbl
.set_draw_region
= i830_set_draw_region
;
886 i830
->intel
.vtbl
.update_draw_buffer
= i830_update_draw_buffer
;
887 i830
->intel
.vtbl
.update_texture_state
= i830UpdateTextureState
;
888 i830
->intel
.vtbl
.render_start
= i830_render_start
;
889 i830
->intel
.vtbl
.render_prevalidate
= i830_render_prevalidate
;
890 i830
->intel
.vtbl
.assert_not_dirty
= i830_assert_not_dirty
;
891 i830
->intel
.vtbl
.finish_batch
= intel_finish_vb
;
892 i830
->intel
.vtbl
.invalidate_state
= i830_invalidate_state
;
893 i830
->intel
.vtbl
.render_target_supported
= i830_render_target_supported
;