#define BRW_NEW_PSP 0x800
#define BRW_NEW_METAOPS 0x1000
#define BRW_NEW_FENCE 0x2000
+#define BRW_NEW_LOCK 0x4000
GLuint primitive;
GLboolean emit_state_always;
-
GLboolean wrap;
+ GLboolean tmp_fallback;
struct {
struct brw_state_flags dirty;
} state;
struct brw_state_pointers attribs;
-
- GLboolean tmp_fallback;
-
struct brw_mem_pool pool[BRW_MAX_POOL];
struct brw_cache cache[BRW_MAX_CACHE];
struct brw_cached_batch_item *cached_batch_items;
bd.dword1.bits.tiled_surface = intel->depth_region->tiled;
bd.dword1.bits.surface_type = BRW_SURFACE_2D;
+ /* BRW_NEW_LOCK */
bd.dword2_base_addr = bmBufferOffset(intel, region->buffer);
bd.dword3.bits.mipmap_layout = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
const struct brw_tracked_state brw_depthbuffer = {
.dirty = {
.mesa = 0,
- .brw = BRW_NEW_CONTEXT | BRW_NEW_FENCE,
+ .brw = BRW_NEW_CONTEXT | BRW_NEW_LOCK,
.cache = 0
},
.update = upload_depthbuffer
sba.header.opcode = CMD_STATE_BASE_ADDRESS;
sba.header.length = 0x4;
+ /* BRW_NEW_LOCK */
sba.bits0.general_state_address = bmBufferOffset(intel, brw->pool[BRW_GS_POOL].buffer) >> 5;
sba.bits0.modify_enable = 1;
+ /* BRW_NEW_LOCK */
sba.bits1.surface_state_address = bmBufferOffset(intel, brw->pool[BRW_SS_POOL].buffer) >> 5;
sba.bits1.modify_enable = 1;
const struct brw_tracked_state brw_state_base_address = {
.dirty = {
.mesa = 0,
- .brw = BRW_NEW_CONTEXT | BRW_NEW_FENCE,
+ .brw = BRW_NEW_CONTEXT | BRW_NEW_LOCK,
.cache = 0
},
.update = upload_state_base_address
brw_pool_check_wrap(brw, &brw->pool[BRW_GS_POOL]);
brw_pool_check_wrap(brw, &brw->pool[BRW_SS_POOL]);
+
+ brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_LOCK;
}
if (INTEL_DEBUG & DEBUG_WM) {
_mesa_printf("\n\n\npre-fp:\n");
-/* _mesa_print_program(&fp->program); */
+ _mesa_print_program(&fp->program.Base);
_mesa_printf("\n");
}
assert(per_thread <= 12 * 1024);
wm.thread2.per_thread_scratch_space = (per_thread / 1024) - 1;
+
+ /* XXX: could make this dynamic as this is so rarely active:
+ */
+ /* BRW_NEW_LOCK */
wm.thread2.scratch_space_base_pointer =
bmBufferOffset(intel, brw->wm.scratch_buffer) >> 10;
}
.brw = (BRW_NEW_FRAGMENT_PROGRAM |
BRW_NEW_CURBE_OFFSETS |
- BRW_NEW_FENCE),
+ BRW_NEW_LOCK),
.cache = (CACHE_NEW_SURFACE |
CACHE_NEW_WM_PROG |
*/
/* surf->ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */
+ /* BRW_NEW_LOCK */
surf->ss1.base_addr = bmBufferOffset(intel,
intelObj->mt->region->buffer);
surf.ss0.writedisable_blue = !brw->attribs.Color->ColorMask[2];
surf.ss0.writedisable_alpha = !brw->attribs.Color->ColorMask[3];
+ /* BRW_NEW_LOCK */
surf.ss1.base_addr = bmBufferOffset(&brw->intel, region->buffer);
.dirty = {
.mesa = _NEW_COLOR | _NEW_TEXTURE | _NEW_BUFFERS,
.brw = (BRW_NEW_CONTEXT |
- BRW_NEW_FENCE), /* required for bmBufferOffset */
+ BRW_NEW_LOCK), /* required for bmBufferOffset */
.cache = 0
},
.update = upload_wm_surfaces
{ "sing", DEBUG_SINGLE_THREAD },
{ "thre", DEBUG_SINGLE_THREAD },
{ "wm", DEBUG_WM },
+ { "vs", DEBUG_VS },
{ NULL, 0 }
};
/* Lost context?
*/
if (sarea->ctxOwner != me) {
- intel->perf_boxes |= I830_BOX_LOST_CONTEXT;
sarea->ctxOwner = me;
/* Should also fence the frontbuffer even if ctxOwner doesn't
intel->vtbl.lost_hardware( intel );
}
- /* Because the X server issues drawing commands without properly
- * fencing them, we need to be paraniod about waiting for hardware
- * rendering to finish after a contended lock.
- */
- intel->flushBeforeFallback = GL_TRUE;
-
/* Drawable changed?
*/
if (dPriv && intel->lastStamp != dPriv->lastStamp) {
GLboolean aub_wrap;
- GLboolean flushBeforeFallback;
-
struct intel_batchbuffer *batch;
struct {
GLboolean hw_stencil;
GLboolean hw_stipple;
GLboolean depth_buffer_is_float;
- GLboolean perf_boxes;
GLboolean no_hw;
GLboolean no_rast;
GLboolean thrashing;
#define DEBUG_SINGLE_THREAD 0x8000
#define DEBUG_WM 0x10000
#define DEBUG_URB 0x20000
+#define DEBUG_VS 0x40000
#define PCI_CHIP_845_G 0x2562
LOCK_HARDWARE(intel);
-#if 0
- if (intel->flushBeforeFallback) {
- intelFinish(&intel->ctx);
- intel->flushBeforeFallback = GL_FALSE;
- }
-#endif
-
/* Just map the framebuffer and all textures. Bufmgr code will
* take care of waiting on the necessary fences:
*/