i965g: more files compiling
[mesa.git] / src / gallium / drivers / i965 / brw_pipe_flush.c
1
2 /**
3 * called from brw_batchbuffer_flush and children before sending a
4 * batchbuffer off.
5 */
6 static void brw_finish_batch(struct intel_context *intel)
7 {
8 struct brw_context *brw = brw_context(&intel->ctx);
9 brw_emit_query_end(brw);
10 }
11
12
13 /**
14 * called from intelFlushBatchLocked
15 */
16 static void brw_new_batch( struct brw_context *brw )
17 {
18 /* Check that we didn't just wrap our batchbuffer at a bad time. */
19 assert(!brw->no_batch_wrap);
20
21 brw->curbe.need_new_bo = GL_TRUE;
22
23 /* Mark all context state as needing to be re-emitted.
24 * This is probably not as severe as on 915, since almost all of our state
25 * is just in referenced buffers.
26 */
27 brw->state.dirty.brw |= BRW_NEW_CONTEXT;
28
29 brw->state.dirty.mesa |= ~0;
30 brw->state.dirty.brw |= ~0;
31 brw->state.dirty.cache |= ~0;
32
33 /* Move to the end of the current upload buffer so that we'll force choosing
34 * a new buffer next time.
35 */
36 if (brw->vb.upload.bo != NULL) {
37 brw->sws->bo_unreference(brw->vb.upload.bo);
38 brw->vb.upload.bo = NULL;
39 brw->vb.upload.offset = 0;
40 }
41 }
42
43
44 static void brw_note_fence( struct brw_context *brw, GLuint fence )
45 {
46 brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_FENCE;
47 }
48
49 /* called from intelWaitForIdle() and intelFlush()
50 *
51 * For now, just flush everything. Could be smarter later.
52 */
53 static GLuint brw_flush_cmd( void )
54 {
55 struct brw_mi_flush flush;
56 flush.opcode = CMD_MI_FLUSH;
57 flush.pad = 0;
58 flush.flags = BRW_FLUSH_STATE_CACHE;
59 return *(GLuint *)&flush;
60 }
61
62