1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
36 #include "tnl/t_context.h"
37 #include "tnl/t_vertex.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_tex.h"
41 #include "intel_regions.h"
44 #include "i915_context.h"
47 i915_render_prevalidate(struct intel_context
*intel
)
49 struct i915_context
*i915
= i915_context(&intel
->ctx
);
51 i915ValidateFragmentProgram(i915
);
55 i915_render_start(struct intel_context
*intel
)
61 i915_reduced_primitive_state(struct intel_context
*intel
, GLenum rprim
)
63 struct i915_context
*i915
= i915_context(&intel
->ctx
);
64 GLuint st1
= i915
->state
.Stipple
[I915_STPREG_ST1
];
69 case GL_QUADS
: /* from RASTERIZE(GL_QUADS) in t_dd_tritemp.h */
71 if (intel
->ctx
.Polygon
.StippleFlag
&& intel
->hw_stipple
)
80 i915
->intel
.reduced_primitive
= rprim
;
82 if (st1
!= i915
->state
.Stipple
[I915_STPREG_ST1
]) {
83 INTEL_FIREVERTICES(intel
);
85 I915_STATECHANGE(i915
, I915_UPLOAD_STIPPLE
);
86 i915
->state
.Stipple
[I915_STPREG_ST1
] = st1
;
91 /* Pull apart the vertex format registers and figure out how large a
92 * vertex is supposed to be.
95 i915_check_vertex_size(struct intel_context
*intel
, GLuint expected
)
97 struct i915_context
*i915
= i915_context(&intel
->ctx
);
98 int lis2
= i915
->current
->Ctx
[I915_CTXREG_LIS2
];
99 int lis4
= i915
->current
->Ctx
[I915_CTXREG_LIS4
];
102 switch (lis4
& S4_VFMT_XYZW_MASK
) {
116 fprintf(stderr
, "no xyzw specified\n");
120 if (lis4
& S4_VFMT_SPEC_FOG
)
122 if (lis4
& S4_VFMT_COLOR
)
124 if (lis4
& S4_VFMT_DEPTH_OFFSET
)
126 if (lis4
& S4_VFMT_POINT_WIDTH
)
128 if (lis4
& S4_VFMT_FOG_PARAM
)
131 for (i
= 0; i
< 8; i
++) {
132 switch (lis2
& S2_TEXCOORD_FMT0_MASK
) {
145 case TEXCOORDFMT_2D_16
:
148 case TEXCOORDFMT_4D_16
:
151 case TEXCOORDFMT_NOT_PRESENT
:
154 fprintf(stderr
, "bad texcoord fmt %d\n", i
);
157 lis2
>>= S2_TEXCOORD_FMT1_SHIFT
;
161 fprintf(stderr
, "vertex size mismatch %d/%d\n", sz
, expected
);
163 return sz
== expected
;
168 i915_emit_invarient_state(struct intel_context
*intel
)
174 OUT_BATCH(_3DSTATE_AA_CMD
|
175 AA_LINE_ECAAR_WIDTH_ENABLE
|
176 AA_LINE_ECAAR_WIDTH_1_0
|
177 AA_LINE_REGION_WIDTH_ENABLE
| AA_LINE_REGION_WIDTH_1_0
);
179 OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD
);
182 OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD
);
185 OUT_BATCH(_3DSTATE_DFLT_Z_CMD
);
188 /* Don't support texture crossbar yet */
189 OUT_BATCH(_3DSTATE_COORD_SET_BINDINGS
|
194 CSB_TCB(4, 4) | CSB_TCB(5, 5) | CSB_TCB(6, 6) | CSB_TCB(7, 7));
196 OUT_BATCH(_3DSTATE_RASTER_RULES_CMD
|
197 ENABLE_POINT_RASTER_RULE
|
198 OGL_POINT_RASTER_RULE
|
199 ENABLE_LINE_STRIP_PROVOKE_VRTX
|
200 ENABLE_TRI_FAN_PROVOKE_VRTX
|
201 LINE_STRIP_PROVOKE_VRTX(1) |
202 TRI_FAN_PROVOKE_VRTX(2) | ENABLE_TEXKILL_3D_4D
| TEXKILL_4D
);
204 /* Need to initialize this to zero.
206 OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1
| I1_LOAD_S(3) | (0));
210 OUT_BATCH(_3DSTATE_SCISSOR_ENABLE_CMD
| DISABLE_SCISSOR_RECT
);
212 OUT_BATCH(_3DSTATE_SCISSOR_RECT_0_CMD
);
216 OUT_BATCH(_3DSTATE_DEPTH_SUBRECT_DISABLE
);
218 OUT_BATCH(_3DSTATE_LOAD_INDIRECT
| 0); /* disable indirect state */
222 /* Don't support twosided stencil yet */
223 OUT_BATCH(_3DSTATE_BACKFACE_STENCIL_OPS
| BFO_ENABLE_STENCIL_TWO_SIDE
| 0);
230 #define emit(intel, state, size ) \
231 intel_batchbuffer_data(intel->batch, state, size, 0 )
234 get_dirty(struct i915_hw_state
*state
)
238 /* Workaround the multitex hang - if one texture unit state is
239 * modified, emit all texture units.
241 dirty
= state
->active
& ~state
->emitted
;
242 if (dirty
& I915_UPLOAD_TEX_ALL
)
243 state
->emitted
&= ~I915_UPLOAD_TEX_ALL
;
244 dirty
= state
->active
& ~state
->emitted
;
250 get_state_size(struct i915_hw_state
*state
)
252 GLuint dirty
= get_dirty(state
);
256 if (dirty
& I915_UPLOAD_INVARIENT
)
259 if (dirty
& I915_UPLOAD_CTX
)
260 sz
+= sizeof(state
->Ctx
);
262 if (dirty
& I915_UPLOAD_BUFFERS
)
263 sz
+= sizeof(state
->Buffer
);
265 if (dirty
& I915_UPLOAD_STIPPLE
)
266 sz
+= sizeof(state
->Stipple
);
268 if (dirty
& I915_UPLOAD_FOG
)
269 sz
+= sizeof(state
->Fog
);
271 if (dirty
& I915_UPLOAD_TEX_ALL
) {
273 for (i
= 0; i
< I915_TEX_UNITS
; i
++)
274 if (dirty
& I915_UPLOAD_TEX(i
))
277 sz
+= (2 + nr
* 3) * sizeof(GLuint
) * 2;
280 if (dirty
& I915_UPLOAD_CONSTANTS
)
281 sz
+= state
->ConstantSize
* sizeof(GLuint
);
283 if (dirty
& I915_UPLOAD_PROGRAM
)
284 sz
+= state
->ProgramSize
* sizeof(GLuint
);
290 /* Push the state into the sarea and/or texture memory.
293 i915_do_emit_state(struct intel_context
*intel
)
295 struct i915_context
*i915
= i915_context(&intel
->ctx
);
296 struct i915_hw_state
*state
= i915
->current
;
301 /* We don't hold the lock at this point, so want to make sure that
302 * there won't be a buffer wrap.
304 * It might be better to talk about explicit places where
305 * scheduling is allowed, rather than assume that it is whenever a
306 * batchbuffer fills up.
308 intel_batchbuffer_require_space(intel
->batch
, get_state_size(state
), 0);
310 /* Workaround. There are cases I haven't been able to track down
311 * where we aren't emitting a full state at the start of a new
312 * batchbuffer. This code spots that we are on a new batchbuffer
313 * and forces a full state emit no matter what.
315 * In the normal case state->emitted is already zero, this code is
316 * another set of checks to make sure it really is.
318 if (intel
->batch
->id
!= intel
->last_state_batch_id
||
319 intel
->batch
->map
== intel
->batch
->ptr
)
322 intel_batchbuffer_require_space(intel
->batch
, get_state_size(state
), 0);
325 /* Do this here as we may have flushed the batchbuffer above,
326 * causing more state to be dirty!
328 dirty
= get_dirty(state
);
329 state
->emitted
|= dirty
;
330 assert(get_dirty(state
) == 0);
332 if (intel
->batch
->id
!= intel
->last_state_batch_id
) {
333 assert(dirty
& I915_UPLOAD_CTX
);
334 intel
->last_state_batch_id
= intel
->batch
->id
;
337 if (INTEL_DEBUG
& DEBUG_STATE
)
338 fprintf(stderr
, "%s dirty: %x\n", __FUNCTION__
, dirty
);
340 if (dirty
& I915_UPLOAD_INVARIENT
) {
341 if (INTEL_DEBUG
& DEBUG_STATE
)
342 fprintf(stderr
, "I915_UPLOAD_INVARIENT:\n");
343 i915_emit_invarient_state(intel
);
346 if (dirty
& I915_UPLOAD_CTX
) {
347 if (INTEL_DEBUG
& DEBUG_STATE
)
348 fprintf(stderr
, "I915_UPLOAD_CTX:\n");
350 emit(intel
, state
->Ctx
, sizeof(state
->Ctx
));
353 if (dirty
& I915_UPLOAD_BUFFERS
) {
354 if (INTEL_DEBUG
& DEBUG_STATE
)
355 fprintf(stderr
, "I915_UPLOAD_BUFFERS:\n");
356 BEGIN_BATCH(I915_DEST_SETUP_SIZE
+ 2, 0);
357 OUT_BATCH(state
->Buffer
[I915_DESTREG_CBUFADDR0
]);
358 OUT_BATCH(state
->Buffer
[I915_DESTREG_CBUFADDR1
]);
359 OUT_RELOC(state
->draw_region
->buffer
,
360 DRM_BO_FLAG_MEM_TT
| DRM_BO_FLAG_WRITE
,
361 state
->draw_region
->draw_offset
);
363 if (state
->depth_region
) {
364 OUT_BATCH(state
->Buffer
[I915_DESTREG_DBUFADDR0
]);
365 OUT_BATCH(state
->Buffer
[I915_DESTREG_DBUFADDR1
]);
366 OUT_RELOC(state
->depth_region
->buffer
,
367 DRM_BO_FLAG_MEM_TT
| DRM_BO_FLAG_WRITE
,
368 state
->depth_region
->draw_offset
);
371 OUT_BATCH(state
->Buffer
[I915_DESTREG_DV0
]);
372 OUT_BATCH(state
->Buffer
[I915_DESTREG_DV1
]);
373 OUT_BATCH(state
->Buffer
[I915_DESTREG_SENABLE
]);
374 OUT_BATCH(state
->Buffer
[I915_DESTREG_SR0
]);
375 OUT_BATCH(state
->Buffer
[I915_DESTREG_SR1
]);
376 OUT_BATCH(state
->Buffer
[I915_DESTREG_SR2
]);
380 if (dirty
& I915_UPLOAD_STIPPLE
) {
381 if (INTEL_DEBUG
& DEBUG_STATE
)
382 fprintf(stderr
, "I915_UPLOAD_STIPPLE:\n");
383 emit(intel
, state
->Stipple
, sizeof(state
->Stipple
));
386 if (dirty
& I915_UPLOAD_FOG
) {
387 if (INTEL_DEBUG
& DEBUG_STATE
)
388 fprintf(stderr
, "I915_UPLOAD_FOG:\n");
389 emit(intel
, state
->Fog
, sizeof(state
->Fog
));
392 /* Combine all the dirty texture state into a single command to
393 * avoid lockups on I915 hardware.
395 if (dirty
& I915_UPLOAD_TEX_ALL
) {
398 for (i
= 0; i
< I915_TEX_UNITS
; i
++)
399 if (dirty
& I915_UPLOAD_TEX(i
))
402 BEGIN_BATCH(2 + nr
* 3, 0);
403 OUT_BATCH(_3DSTATE_MAP_STATE
| (3 * nr
));
404 OUT_BATCH((dirty
& I915_UPLOAD_TEX_ALL
) >> I915_UPLOAD_TEX_0_SHIFT
);
405 for (i
= 0; i
< I915_TEX_UNITS
; i
++)
406 if (dirty
& I915_UPLOAD_TEX(i
)) {
408 if (state
->tex_buffer
[i
]) {
409 OUT_RELOC(state
->tex_buffer
[i
],
410 DRM_BO_FLAG_MEM_TT
| DRM_BO_FLAG_READ
,
411 state
->tex_offset
[i
]);
413 else if (state
== &i915
->meta
) {
418 OUT_BATCH(state
->tex_offset
[i
]);
421 OUT_BATCH(state
->Tex
[i
][I915_TEXREG_MS3
]);
422 OUT_BATCH(state
->Tex
[i
][I915_TEXREG_MS4
]);
426 BEGIN_BATCH(2 + nr
* 3, 0);
427 OUT_BATCH(_3DSTATE_SAMPLER_STATE
| (3 * nr
));
428 OUT_BATCH((dirty
& I915_UPLOAD_TEX_ALL
) >> I915_UPLOAD_TEX_0_SHIFT
);
429 for (i
= 0; i
< I915_TEX_UNITS
; i
++)
430 if (dirty
& I915_UPLOAD_TEX(i
)) {
431 OUT_BATCH(state
->Tex
[i
][I915_TEXREG_SS2
]);
432 OUT_BATCH(state
->Tex
[i
][I915_TEXREG_SS3
]);
433 OUT_BATCH(state
->Tex
[i
][I915_TEXREG_SS4
]);
438 if (dirty
& I915_UPLOAD_CONSTANTS
) {
439 if (INTEL_DEBUG
& DEBUG_STATE
)
440 fprintf(stderr
, "I915_UPLOAD_CONSTANTS:\n");
441 emit(intel
, state
->Constant
, state
->ConstantSize
* sizeof(GLuint
));
444 if (dirty
& I915_UPLOAD_PROGRAM
) {
445 if (state
->ProgramSize
) {
446 if (INTEL_DEBUG
& DEBUG_STATE
)
447 fprintf(stderr
, "I915_UPLOAD_PROGRAM:\n");
449 assert((state
->Program
[0] & 0x1ff) + 2 == state
->ProgramSize
);
451 emit(intel
, state
->Program
, state
->ProgramSize
* sizeof(GLuint
));
452 if (INTEL_DEBUG
& DEBUG_STATE
)
453 i915_disassemble_program(state
->Program
, state
->ProgramSize
);
457 intel
->batch
->dirty_state
&= ~dirty
;
458 assert(get_dirty(state
) == 0);
462 i915_emit_state(struct intel_context
*intel
)
464 struct i915_context
*i915
= i915_context(&intel
->ctx
);
466 i915_do_emit_state( intel
);
468 /* Second chance - catch batchbuffer wrap in the middle of state
469 * emit. This shouldn't happen but it has been observed in
472 if (get_dirty( i915
->current
)) {
473 /* Force a full re-emit if this happens.
475 i915
->current
->emitted
= 0;
476 i915_do_emit_state( intel
);
479 assert(get_dirty(i915
->current
) == 0);
480 assert((intel
->batch
->dirty_state
& (1<<1)) == 0);
484 i915_destroy_context(struct intel_context
*intel
)
487 struct i915_context
*i915
= i915_context(&intel
->ctx
);
489 for (i
= 0; i
< I915_TEX_UNITS
; i
++) {
490 if (i915
->state
.tex_buffer
[i
] != NULL
) {
491 dri_bo_unreference(i915
->state
.tex_buffer
[i
]);
492 i915
->state
.tex_buffer
[i
] = NULL
;
496 _tnl_free_vertices(&intel
->ctx
);
501 * Set the drawing regions for the color and depth/stencil buffers.
502 * This involves setting the pitch, cpp and buffer ID/location.
503 * Also set pixel format for color and Z rendering
504 * Used for setting both regular and meta state.
507 i915_state_draw_region(struct intel_context
*intel
,
508 struct i915_hw_state
*state
,
509 struct intel_region
*color_region
,
510 struct intel_region
*depth_region
)
512 struct i915_context
*i915
= i915_context(&intel
->ctx
);
515 ASSERT(state
== &i915
->state
|| state
== &i915
->meta
);
517 if (state
->draw_region
!= color_region
) {
518 intel_region_release(&state
->draw_region
);
519 intel_region_reference(&state
->draw_region
, color_region
);
521 if (state
->depth_region
!= depth_region
) {
522 intel_region_release(&state
->depth_region
);
523 intel_region_reference(&state
->depth_region
, depth_region
);
527 * Set stride/cpp values
530 state
->Buffer
[I915_DESTREG_CBUFADDR0
] = _3DSTATE_BUF_INFO_CMD
;
531 state
->Buffer
[I915_DESTREG_CBUFADDR1
] =
532 (BUF_3D_ID_COLOR_BACK
|
533 BUF_3D_PITCH(color_region
->pitch
* color_region
->cpp
) |
538 state
->Buffer
[I915_DESTREG_DBUFADDR0
] = _3DSTATE_BUF_INFO_CMD
;
539 state
->Buffer
[I915_DESTREG_DBUFADDR1
] =
541 BUF_3D_PITCH(depth_region
->pitch
* depth_region
->cpp
) |
546 * Compute/set I915_DESTREG_DV1 value
548 value
= (DSTORG_HORT_BIAS(0x8) | /* .5 */
549 DSTORG_VERT_BIAS(0x8) | /* .5 */
550 LOD_PRECLAMP_OGL
| TEX_DEFAULT_COLOR_OGL
);
551 if (color_region
&& color_region
->cpp
== 4) {
555 value
|= (DITHER_FULL_ALWAYS
| DV_PF_565
);
557 if (depth_region
&& depth_region
->cpp
== 4) {
558 value
|= DEPTH_FRMT_24_FIXED_8_OTHER
;
561 value
|= DEPTH_FRMT_16_FIXED
;
563 state
->Buffer
[I915_DESTREG_DV1
] = value
;
565 I915_STATECHANGE(i915
, I915_UPLOAD_BUFFERS
);
570 i915_set_draw_region(struct intel_context
*intel
,
571 struct intel_region
*color_region
,
572 struct intel_region
*depth_region
)
574 struct i915_context
*i915
= i915_context(&intel
->ctx
);
575 i915_state_draw_region(intel
, &i915
->state
, color_region
, depth_region
);
581 i915_lost_hardware(struct intel_context
*intel
)
583 struct i915_context
*i915
= i915_context(&intel
->ctx
);
584 i915
->state
.emitted
= 0;
590 return MI_FLUSH
| FLUSH_MAP_CACHE
;
594 i915_assert_not_dirty( struct intel_context
*intel
)
596 struct i915_context
*i915
= i915_context(&intel
->ctx
);
597 struct i915_hw_state
*state
= i915
->current
;
598 GLuint dirty
= get_dirty(state
);
604 i915InitVtbl(struct i915_context
*i915
)
606 i915
->intel
.vtbl
.check_vertex_size
= i915_check_vertex_size
;
607 i915
->intel
.vtbl
.destroy
= i915_destroy_context
;
608 i915
->intel
.vtbl
.emit_state
= i915_emit_state
;
609 i915
->intel
.vtbl
.lost_hardware
= i915_lost_hardware
;
610 i915
->intel
.vtbl
.reduced_primitive_state
= i915_reduced_primitive_state
;
611 i915
->intel
.vtbl
.render_start
= i915_render_start
;
612 i915
->intel
.vtbl
.render_prevalidate
= i915_render_prevalidate
;
613 i915
->intel
.vtbl
.set_draw_region
= i915_set_draw_region
;
614 i915
->intel
.vtbl
.update_texture_state
= i915UpdateTextureState
;
615 i915
->intel
.vtbl
.flush_cmd
= i915_flush_cmd
;
616 i915
->intel
.vtbl
.assert_not_dirty
= i915_assert_not_dirty
;