intel: Remove the non-gem paths for batchbuffer upload.
[mesa.git] / src / mesa / drivers / dri / intel / intel_batchbuffer.c
index ca8e34483685dcf6992741371845ddaa23cffe48..d34176236b7e8f1ed1d18bc66c4988d5d4fef6cc 100644 (file)
@@ -42,19 +42,12 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
       batch->buf = NULL;
    }
 
-   if (!batch->buffer)
-      batch->buffer = malloc (intel->maxBatchSize);
-
    batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer",
                             intel->maxBatchSize, 4096);
-   if (batch->buffer)
-      batch->map = batch->buffer;
-   else {
-      dri_bo_map(batch->buf, GL_TRUE);
-      batch->map = batch->buf->virtual;
-   }
+   batch->map = batch->buffer;
    batch->size = intel->maxBatchSize;
    batch->ptr = batch->map;
+   batch->reserved_space = BATCH_RESERVED;
    batch->dirty_state = ~0;
 }
 
@@ -64,6 +57,7 @@ intel_batchbuffer_alloc(struct intel_context *intel)
    struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
 
    batch->intel = intel;
+   batch->buffer = malloc(intel->maxBatchSize);
    intel_batchbuffer_reset(batch);
 
    return batch;
@@ -72,14 +66,7 @@ intel_batchbuffer_alloc(struct intel_context *intel)
 void
 intel_batchbuffer_free(struct intel_batchbuffer *batch)
 {
-   if (batch->buffer)
-      free (batch->buffer);
-   else {
-      if (batch->map) {
-        dri_bo_unmap(batch->buf);
-        batch->map = NULL;
-      }
-   }
+   free (batch->buffer);
    dri_bo_unreference(batch->buf);
    batch->buf = NULL;
    free(batch);
@@ -96,12 +83,8 @@ do_flush_locked(struct intel_batchbuffer *batch, GLuint used)
    int ret = 0;
    int x_off = 0, y_off = 0;
 
-   if (batch->buffer)
-      dri_bo_subdata (batch->buf, 0, used, batch->buffer);
-   else
-      dri_bo_unmap(batch->buf);
+   dri_bo_subdata (batch->buf, 0, used, batch->buffer);
 
-   batch->map = NULL;
    batch->ptr = NULL;
 
    if (!intel->no_hw)
@@ -180,8 +163,6 @@ _intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file,
    /* Check that we didn't just wrap our batchbuffer at a bad time. */
    assert(!intel->no_batch_wrap);
 
-   batch->reserved_space = BATCH_RESERVED;
-
    /* TODO: Just pass the relocation list and dma buffer up to the
     * kernel.
     */
@@ -275,10 +256,18 @@ intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
 {
    struct intel_context *intel = batch->intel;
 
-   if (intel->gen >= 4) {
+   if (intel->gen >= 6) {
+      BEGIN_BATCH(4);
+      OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+      OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
+               PIPE_CONTROL_WRITE_FLUSH |
+               PIPE_CONTROL_NO_WRITE);
+      OUT_BATCH(0); /* write address */
+      OUT_BATCH(0); /* write data */
+      ADVANCE_BATCH();
+   } else if (intel->gen >= 4) {
       BEGIN_BATCH(4);
       OUT_BATCH(_3DSTATE_PIPE_CONTROL |
-               PIPE_CONTROL_INSTRUCTION_FLUSH |
                PIPE_CONTROL_WRITE_FLUSH |
                PIPE_CONTROL_NO_WRITE);
       OUT_BATCH(0); /* write address */