15ceac6ba1e4dbf130aa3214f86eeaa54f4be0be
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vtbl.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27
28 /*
29 * Authors:
30 * Keith Whitwell <keith@tungstengraphics.com>
31 */
32
33 #include "main/glheader.h"
34 #include "main/mtypes.h"
35 #include "main/imports.h"
36 #include "main/macros.h"
37 #include "main/colormac.h"
38
39 #include "intel_batchbuffer.h"
40 #include "intel_regions.h"
41
42 #include "brw_context.h"
43 #include "brw_defines.h"
44 #include "brw_state.h"
45 #include "brw_draw.h"
46 #include "brw_state.h"
47 #include "brw_fallback.h"
48 #include "brw_vs.h"
49
50
51 static void
52 dri_bo_release(dri_bo **bo)
53 {
54 dri_bo_unreference(*bo);
55 *bo = NULL;
56 }
57
58
59 /**
60 * called from intelDestroyContext()
61 */
62 static void brw_destroy_context( struct intel_context *intel )
63 {
64 struct brw_context *brw = brw_context(&intel->ctx);
65 int i;
66
67 brw_destroy_state(brw);
68 brw_draw_destroy( brw );
69
70 _mesa_free(brw->wm.compile_data);
71
72 brw_FrameBufferTexDestroy( brw );
73
74 for (i = 0; i < brw->state.nr_draw_regions; i++)
75 intel_region_release(&brw->state.draw_regions[i]);
76 brw->state.nr_draw_regions = 0;
77 intel_region_release(&brw->state.depth_region);
78
79 dri_bo_release(&brw->curbe.curbe_bo);
80 dri_bo_release(&brw->vs.prog_bo);
81 dri_bo_release(&brw->vs.state_bo);
82 dri_bo_release(&brw->gs.prog_bo);
83 dri_bo_release(&brw->gs.state_bo);
84 dri_bo_release(&brw->clip.prog_bo);
85 dri_bo_release(&brw->clip.state_bo);
86 dri_bo_release(&brw->clip.vp_bo);
87 dri_bo_release(&brw->sf.prog_bo);
88 dri_bo_release(&brw->sf.state_bo);
89 dri_bo_release(&brw->sf.vp_bo);
90 for (i = 0; i < BRW_MAX_TEX_UNIT; i++)
91 dri_bo_release(&brw->wm.sdc_bo[i]);
92 dri_bo_release(&brw->wm.bind_bo);
93 for (i = 0; i < BRW_WM_MAX_SURF; i++)
94 dri_bo_release(&brw->wm.surf_bo[i]);
95 dri_bo_release(&brw->wm.sampler_bo);
96 dri_bo_release(&brw->wm.prog_bo);
97 dri_bo_release(&brw->wm.state_bo);
98 dri_bo_release(&brw->cc.prog_bo);
99 dri_bo_release(&brw->cc.state_bo);
100 dri_bo_release(&brw->cc.vp_bo);
101 }
102
103
104 /**
105 * called from intelDrawBuffer()
106 */
107 static void brw_set_draw_region( struct intel_context *intel,
108 struct intel_region *draw_regions[],
109 struct intel_region *depth_region,
110 GLuint num_regions)
111 {
112 struct brw_context *brw = brw_context(&intel->ctx);
113 int i;
114
115 /* release old color/depth regions */
116 if (brw->state.depth_region != depth_region)
117 brw->state.dirty.brw |= BRW_NEW_DEPTH_BUFFER;
118 for (i = 0; i < brw->state.nr_draw_regions; i++)
119 intel_region_release(&brw->state.draw_regions[i]);
120 intel_region_release(&brw->state.depth_region);
121
122 /* reference new color/depth regions */
123 for (i = 0; i < num_regions; i++)
124 intel_region_reference(&brw->state.draw_regions[i], draw_regions[i]);
125 intel_region_reference(&brw->state.depth_region, depth_region);
126 brw->state.nr_draw_regions = num_regions;
127 }
128
129
130 /**
131 * called from intel_batchbuffer_flush and children before sending a
132 * batchbuffer off.
133 */
134 static void brw_finish_batch(struct intel_context *intel)
135 {
136 struct brw_context *brw = brw_context(&intel->ctx);
137 brw_emit_query_end(brw);
138 }
139
140
141 /**
142 * called from intelFlushBatchLocked
143 */
144 static void brw_new_batch( struct intel_context *intel )
145 {
146 struct brw_context *brw = brw_context(&intel->ctx);
147
148 /* Check that we didn't just wrap our batchbuffer at a bad time. */
149 assert(!brw->no_batch_wrap);
150
151 brw->curbe.need_new_bo = GL_TRUE;
152
153 /* Mark all context state as needing to be re-emitted.
154 * This is probably not as severe as on 915, since almost all of our state
155 * is just in referenced buffers.
156 */
157 brw->state.dirty.brw |= BRW_NEW_CONTEXT;
158
159 brw->state.dirty.mesa |= ~0;
160 brw->state.dirty.brw |= ~0;
161 brw->state.dirty.cache |= ~0;
162
163 /* Move to the end of the current upload buffer so that we'll force choosing
164 * a new buffer next time.
165 */
166 if (brw->vb.upload.bo != NULL) {
167 dri_bo_unreference(brw->vb.upload.bo);
168 brw->vb.upload.bo = NULL;
169 brw->vb.upload.offset = 0;
170 }
171 }
172
173
174 static void brw_note_fence( struct intel_context *intel, GLuint fence )
175 {
176 brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_FENCE;
177 }
178
179
180 static void brw_note_unlock( struct intel_context *intel )
181 {
182 struct brw_context *brw = brw_context(&intel->ctx);
183 brw_state_cache_check_size(brw);
184 }
185
186
187 void brw_do_flush( struct brw_context *brw, GLuint flags )
188 {
189 struct brw_mi_flush flush;
190 memset(&flush, 0, sizeof(flush));
191 flush.opcode = CMD_MI_FLUSH;
192 flush.flags = flags;
193 BRW_BATCH_STRUCT(brw, &flush);
194 }
195
196
197 static void brw_emit_flush( struct intel_context *intel, GLuint unused )
198 {
199 brw_do_flush(brw_context(&intel->ctx),
200 BRW_FLUSH_STATE_CACHE|BRW_FLUSH_READ_CACHE);
201 }
202
203
204 /* called from intelWaitForIdle() and intelFlush()
205 *
206 * For now, just flush everything. Could be smarter later.
207 */
208 static GLuint brw_flush_cmd( void )
209 {
210 struct brw_mi_flush flush;
211 flush.opcode = CMD_MI_FLUSH;
212 flush.pad = 0;
213 flush.flags = BRW_FLUSH_READ_CACHE | BRW_FLUSH_STATE_CACHE;
214 return *(GLuint *)&flush;
215 }
216
217
218 static void brw_invalidate_state( struct intel_context *intel, GLuint new_state )
219 {
220 /* nothing */
221 }
222
223
224 void brwInitVtbl( struct brw_context *brw )
225 {
226 brw->intel.vtbl.check_vertex_size = 0;
227 brw->intel.vtbl.emit_state = 0;
228 brw->intel.vtbl.reduced_primitive_state = 0;
229 brw->intel.vtbl.render_start = 0;
230 brw->intel.vtbl.update_texture_state = 0;
231
232 brw->intel.vtbl.invalidate_state = brw_invalidate_state;
233 brw->intel.vtbl.note_fence = brw_note_fence;
234 brw->intel.vtbl.note_unlock = brw_note_unlock;
235 brw->intel.vtbl.new_batch = brw_new_batch;
236 brw->intel.vtbl.finish_batch = brw_finish_batch;
237 brw->intel.vtbl.destroy = brw_destroy_context;
238 brw->intel.vtbl.set_draw_region = brw_set_draw_region;
239 brw->intel.vtbl.flush_cmd = brw_flush_cmd;
240 brw->intel.vtbl.emit_flush = brw_emit_flush;
241 brw->intel.vtbl.debug_batch = brw_debug_batch;
242 }