Merge remote branch 'origin/gallium-0.2' into gallium-0.2
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vtbl.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34
35 #include "main/glheader.h"
36 #include "main/mtypes.h"
37 #include "main/imports.h"
38 #include "main/macros.h"
39 #include "main/colormac.h"
40
41 #include "intel_batchbuffer.h"
42 #include "intel_regions.h"
43
44 #include "brw_context.h"
45 #include "brw_defines.h"
46 #include "brw_state.h"
47
48 #include "brw_draw.h"
49 #include "brw_state.h"
50 #include "brw_fallback.h"
51 #include "brw_vs.h"
52 #include <stdarg.h>
53
54 static void
55 dri_bo_release(dri_bo **bo)
56 {
57 dri_bo_unreference(*bo);
58 *bo = NULL;
59 }
60
61 /* called from intelDestroyContext()
62 */
63 static void brw_destroy_context( struct intel_context *intel )
64 {
65 struct brw_context *brw = brw_context(&intel->ctx);
66 int i;
67
68 brw_destroy_metaops(brw);
69 brw_destroy_state(brw);
70 brw_draw_destroy( brw );
71
72 brw_FrameBufferTexDestroy( brw );
73
74 for (i = 0; i < brw->state.nr_draw_regions; i++)
75 intel_region_release(&brw->state.draw_regions[i]);
76 brw->state.nr_draw_regions = 0;
77 intel_region_release(&brw->state.depth_region);
78
79 dri_bo_release(&brw->curbe.curbe_bo);
80 dri_bo_release(&brw->vs.prog_bo);
81 dri_bo_release(&brw->vs.state_bo);
82 dri_bo_release(&brw->gs.prog_bo);
83 dri_bo_release(&brw->gs.state_bo);
84 dri_bo_release(&brw->clip.prog_bo);
85 dri_bo_release(&brw->clip.state_bo);
86 dri_bo_release(&brw->clip.vp_bo);
87 dri_bo_release(&brw->sf.prog_bo);
88 dri_bo_release(&brw->sf.state_bo);
89 dri_bo_release(&brw->sf.vp_bo);
90 for (i = 0; i < BRW_MAX_TEX_UNIT; i++)
91 dri_bo_release(&brw->wm.sdc_bo[i]);
92 dri_bo_release(&brw->wm.bind_bo);
93 for (i = 0; i < BRW_WM_MAX_SURF; i++)
94 dri_bo_release(&brw->wm.surf_bo[i]);
95 dri_bo_release(&brw->wm.prog_bo);
96 dri_bo_release(&brw->wm.state_bo);
97 dri_bo_release(&brw->cc.prog_bo);
98 dri_bo_release(&brw->cc.state_bo);
99 dri_bo_release(&brw->cc.vp_bo);
100 }
101
102 /* called from intelDrawBuffer()
103 */
104 static void brw_set_draw_region( struct intel_context *intel,
105 struct intel_region *draw_regions[],
106 struct intel_region *depth_region,
107 GLuint num_regions)
108 {
109 struct brw_context *brw = brw_context(&intel->ctx);
110 int i;
111 if (brw->state.depth_region != depth_region)
112 brw->state.dirty.brw |= BRW_NEW_DEPTH_BUFFER;
113 for (i = 0; i < brw->state.nr_draw_regions; i++)
114 intel_region_release(&brw->state.draw_regions[i]);
115 intel_region_release(&brw->state.depth_region);
116 for (i = 0; i < num_regions; i++)
117 intel_region_reference(&brw->state.draw_regions[i], draw_regions[i]);
118 intel_region_reference(&brw->state.depth_region, depth_region);
119 brw->state.nr_draw_regions = num_regions;
120 }
121
122 /* called from intel_batchbuffer_flush and children before sending a
123 * batchbuffer off.
124 */
125 static void brw_finish_batch(struct intel_context *intel)
126 {
127 struct brw_context *brw = brw_context(&intel->ctx);
128
129 brw_emit_query_end(brw);
130 }
131
132 /* called from intelFlushBatchLocked
133 */
134 static void brw_new_batch( struct intel_context *intel )
135 {
136 struct brw_context *brw = brw_context(&intel->ctx);
137
138 /* Check that we didn't just wrap our batchbuffer at a bad time. */
139 assert(!brw->no_batch_wrap);
140
141 brw->curbe.need_new_bo = GL_TRUE;
142
143 /* Mark all context state as needing to be re-emitted.
144 * This is probably not as severe as on 915, since almost all of our state
145 * is just in referenced buffers.
146 */
147 brw->state.dirty.brw |= BRW_NEW_CONTEXT;
148
149 brw->state.dirty.mesa |= ~0;
150 brw->state.dirty.brw |= ~0;
151 brw->state.dirty.cache |= ~0;
152
153 /* Move to the end of the current upload buffer so that we'll force choosing
154 * a new buffer next time.
155 */
156 if (brw->vb.upload.bo != NULL) {
157 dri_bo_unreference(brw->vb.upload.bo);
158 brw->vb.upload.bo = NULL;
159 brw->vb.upload.offset = 0;
160 }
161 }
162
163 static void brw_note_fence( struct intel_context *intel,
164 GLuint fence )
165 {
166 brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_FENCE;
167 }
168
169 static void brw_note_unlock( struct intel_context *intel )
170 {
171 struct brw_context *brw = brw_context(&intel->ctx);
172
173 brw_state_cache_check_size(brw);
174
175 brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_LOCK;
176 }
177
178
179 void brw_do_flush( struct brw_context *brw,
180 GLuint flags )
181 {
182 struct brw_mi_flush flush;
183 memset(&flush, 0, sizeof(flush));
184 flush.opcode = CMD_MI_FLUSH;
185 flush.flags = flags;
186 BRW_BATCH_STRUCT(brw, &flush);
187 }
188
189
190 static void brw_emit_flush( struct intel_context *intel,
191 GLuint unused )
192 {
193 brw_do_flush(brw_context(&intel->ctx),
194 BRW_FLUSH_STATE_CACHE|BRW_FLUSH_READ_CACHE);
195 }
196
197
198 /* called from intelWaitForIdle() and intelFlush()
199 *
200 * For now, just flush everything. Could be smarter later.
201 */
202 static GLuint brw_flush_cmd( void )
203 {
204 struct brw_mi_flush flush;
205 flush.opcode = CMD_MI_FLUSH;
206 flush.pad = 0;
207 flush.flags = BRW_FLUSH_READ_CACHE | BRW_FLUSH_STATE_CACHE;
208 return *(GLuint *)&flush;
209 }
210
211 static void brw_invalidate_state( struct intel_context *intel, GLuint new_state )
212 {
213 /* nothing */
214 }
215
216
217 void brwInitVtbl( struct brw_context *brw )
218 {
219 brw->intel.vtbl.check_vertex_size = 0;
220 brw->intel.vtbl.emit_state = 0;
221 brw->intel.vtbl.reduced_primitive_state = 0;
222 brw->intel.vtbl.render_start = 0;
223 brw->intel.vtbl.update_texture_state = 0;
224
225 brw->intel.vtbl.invalidate_state = brw_invalidate_state;
226 brw->intel.vtbl.note_fence = brw_note_fence;
227 brw->intel.vtbl.note_unlock = brw_note_unlock;
228 brw->intel.vtbl.new_batch = brw_new_batch;
229 brw->intel.vtbl.finish_batch = brw_finish_batch;
230 brw->intel.vtbl.destroy = brw_destroy_context;
231 brw->intel.vtbl.set_draw_region = brw_set_draw_region;
232 brw->intel.vtbl.flush_cmd = brw_flush_cmd;
233 brw->intel.vtbl.emit_flush = brw_emit_flush;
234 brw->intel.vtbl.debug_batch = brw_debug_batch;
235 }
236