2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
41 #include "intel_batchbuffer.h"
42 #include "intel_regions.h"
44 #include "brw_context.h"
45 #include "brw_defines.h"
46 #include "brw_state.h"
49 #include "brw_state.h"
50 #include "brw_fallback.h"
55 /* called from intelDestroyContext()
57 static void brw_destroy_context( struct intel_context
*intel
)
59 GLcontext
*ctx
= &intel
->ctx
;
60 struct brw_context
*brw
= brw_context(&intel
->ctx
);
62 brw_destroy_metaops(brw
);
63 brw_destroy_state(brw
);
64 brw_draw_destroy( brw
);
66 brw_ProgramCacheDestroy( ctx
);
67 brw_FrameBufferTexDestroy( brw
);
70 /* called from intelDrawBuffer()
72 static void brw_set_draw_region( struct intel_context
*intel
,
73 struct intel_region
*draw_regions
[],
74 struct intel_region
*depth_region
,
77 struct brw_context
*brw
= brw_context(&intel
->ctx
);
79 if (brw
->state
.depth_region
!= depth_region
)
80 brw
->state
.dirty
.brw
|= BRW_NEW_DEPTH_BUFFER
;
81 for (i
= 0; i
< brw
->state
.nr_draw_regions
; i
++)
82 intel_region_release(&brw
->state
.draw_regions
[i
]);
83 intel_region_release(&brw
->state
.depth_region
);
84 for (i
= 0; i
< num_regions
; i
++)
85 intel_region_reference(&brw
->state
.draw_regions
[i
], draw_regions
[i
]);
86 intel_region_reference(&brw
->state
.depth_region
, depth_region
);
87 brw
->state
.nr_draw_regions
= num_regions
;
91 /* called from intelFlushBatchLocked
93 static void brw_new_batch( struct intel_context
*intel
)
95 struct brw_context
*brw
= brw_context(&intel
->ctx
);
97 /* Check that we didn't just wrap our batchbuffer at a bad time. */
98 assert(!brw
->no_batch_wrap
);
100 dri_bo_unreference(brw
->curbe
.curbe_bo
);
101 brw
->curbe
.curbe_bo
= NULL
;
103 /* Mark all context state as needing to be re-emitted.
104 * This is probably not as severe as on 915, since almost all of our state
105 * is just in referenced buffers.
107 brw
->state
.dirty
.brw
|= BRW_NEW_CONTEXT
;
109 brw
->state
.dirty
.mesa
|= ~0;
110 brw
->state
.dirty
.brw
|= ~0;
111 brw
->state
.dirty
.cache
|= ~0;
113 /* Move to the end of the current upload buffer so that we'll force choosing
114 * a new buffer next time.
116 if (brw
->vb
.upload
.bo
!= NULL
) {
117 dri_bo_unreference(brw
->vb
.upload
.bo
);
118 brw
->vb
.upload
.bo
= NULL
;
119 brw
->vb
.upload
.offset
= 0;
123 static void brw_note_fence( struct intel_context
*intel
,
126 brw_context(&intel
->ctx
)->state
.dirty
.brw
|= BRW_NEW_FENCE
;
129 static void brw_note_unlock( struct intel_context
*intel
)
131 struct brw_context
*brw
= brw_context(&intel
->ctx
);
133 brw_state_cache_check_size(brw
);
135 brw_context(&intel
->ctx
)->state
.dirty
.brw
|= BRW_NEW_LOCK
;
139 void brw_do_flush( struct brw_context
*brw
,
142 struct brw_mi_flush flush
;
143 memset(&flush
, 0, sizeof(flush
));
144 flush
.opcode
= CMD_MI_FLUSH
;
146 BRW_BATCH_STRUCT(brw
, &flush
);
150 static void brw_emit_flush( struct intel_context
*intel
,
153 brw_do_flush(brw_context(&intel
->ctx
),
154 BRW_FLUSH_STATE_CACHE
|BRW_FLUSH_READ_CACHE
);
158 /* called from intelWaitForIdle() and intelFlush()
160 * For now, just flush everything. Could be smarter later.
162 static GLuint
brw_flush_cmd( void )
164 struct brw_mi_flush flush
;
165 flush
.opcode
= CMD_MI_FLUSH
;
167 flush
.flags
= BRW_FLUSH_READ_CACHE
| BRW_FLUSH_STATE_CACHE
;
168 return *(GLuint
*)&flush
;
171 static void brw_invalidate_state( struct intel_context
*intel
, GLuint new_state
)
177 void brwInitVtbl( struct brw_context
*brw
)
179 brw
->intel
.vtbl
.check_vertex_size
= 0;
180 brw
->intel
.vtbl
.emit_state
= 0;
181 brw
->intel
.vtbl
.reduced_primitive_state
= 0;
182 brw
->intel
.vtbl
.render_start
= 0;
183 brw
->intel
.vtbl
.update_texture_state
= 0;
185 brw
->intel
.vtbl
.invalidate_state
= brw_invalidate_state
;
186 brw
->intel
.vtbl
.note_fence
= brw_note_fence
;
187 brw
->intel
.vtbl
.note_unlock
= brw_note_unlock
;
188 brw
->intel
.vtbl
.new_batch
= brw_new_batch
;
189 brw
->intel
.vtbl
.destroy
= brw_destroy_context
;
190 brw
->intel
.vtbl
.set_draw_region
= brw_set_draw_region
;
191 brw
->intel
.vtbl
.flush_cmd
= brw_flush_cmd
;
192 brw
->intel
.vtbl
.emit_flush
= brw_emit_flush
;
193 brw
->intel
.vtbl
.debug_batch
= brw_debug_batch
;