i965: Re-enable fast color clears for GEN11.
[mesa.git] / src / mesa / drivers / dri / i915 / intel_batchbuffer.c
index 74c75a3769bf0ad7952ee6a513554137020e1757..e0f14a43063db015e00f175553e564248b52769b 100644 (file)
@@ -1,6 +1,6 @@
 /**************************************************************************
  * 
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2006 VMware, Inc.
  * All Rights Reserved.
  * 
  * Permission is hereby granted, free of charge, to any person obtaining a
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  * 
  **************************************************************************/
 
+#include "intel_context.h"
 #include "intel_batchbuffer.h"
-#include "intel_ioctl.h"
-#include "intel_decode.h"
+#include "intel_buffer_objects.h"
 #include "intel_reg.h"
+#include "intel_bufmgr.h"
+#include "intel_buffers.h"
 
-/* Relocations in kernel space:
- *    - pass dma buffer seperately
- *    - memory manager knows how to patch
- *    - pass list of dependent buffers
- *    - pass relocation list
- *
- * Either:
- *    - get back an offset for buffer to fire
- *    - memory manager knows how to fire buffer
- *
- * Really want the buffer to be AGP and pinned.
- *
- */
-
-/* Cliprect fence: The highest fence protecting a dma buffer
- * containing explicit cliprect information.  Like the old drawable
- * lock but irq-driven.  X server must wait for this fence to expire
- * before changing cliprects [and then doing sw rendering?].  For
- * other dma buffers, the scheduler will grab current cliprect info
- * and mix into buffer.  X server must hold the lock while changing
- * cliprects???  Make per-drawable.  Need cliprects in shared memory
- * -- beats storing them with every cmd buffer in the queue.
- *
- * ==> X server must wait for this fence to expire before touching the
- * framebuffer with new cliprects.
- *
- * ==> Cliprect-dependent buffers associated with a
- * cliprect-timestamp.  All of the buffers associated with a timestamp
- * must go to hardware before any buffer with a newer timestamp.
- *
- * ==> Dma should be queued per-drawable for correct X/GL
- * synchronization.  Or can fences be used for this?
- *
- * Applies to: Blit operations, metaops, X server operations -- X
- * server automatically waits on its own dma to complete before
- * modifying cliprects ???
- */
+static void
+intel_batchbuffer_reset(struct intel_context *intel);
 
 void
-intel_batchbuffer_reset(struct intel_batchbuffer *batch)
+intel_batchbuffer_init(struct intel_context *intel)
 {
-   struct intel_context *intel = batch->intel;
-
-   if (batch->buf != NULL) {
-      dri_bo_unreference(batch->buf);
-      batch->buf = NULL;
-   }
+   intel_batchbuffer_reset(intel);
 
-   batch->buf = dri_bo_alloc(intel->intelScreen->bufmgr, "batchbuffer",
-                            intel->intelScreen->maxBatchSize, 4096,
-                            DRM_BO_FLAG_MEM_TT);
-   dri_bo_map(batch->buf, GL_TRUE);
-   batch->map = batch->buf->virtual;
-   batch->size = intel->intelScreen->maxBatchSize;
-   batch->ptr = batch->map;
+   intel->batch.cpu_map = malloc(intel->maxBatchSize);
+   intel->batch.map = intel->batch.cpu_map;
 }
 
-struct intel_batchbuffer *
-intel_batchbuffer_alloc(struct intel_context *intel)
+static void
+intel_batchbuffer_reset(struct intel_context *intel)
 {
-   struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
+   if (intel->batch.last_bo != NULL) {
+      drm_intel_bo_unreference(intel->batch.last_bo);
+      intel->batch.last_bo = NULL;
+   }
+   intel->batch.last_bo = intel->batch.bo;
 
-   batch->intel = intel;
-   batch->last_fence = NULL;
-   intel_batchbuffer_reset(batch);
+   intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
+                                       intel->maxBatchSize, 4096);
 
-   return batch;
+   intel->batch.reserved_space = BATCH_RESERVED;
+   intel->batch.used = 0;
 }
 
 void
-intel_batchbuffer_free(struct intel_batchbuffer *batch)
+intel_batchbuffer_free(struct intel_context *intel)
 {
-   if (batch->last_fence) {
-      dri_fence_wait(batch->last_fence);
-      dri_fence_unreference(batch->last_fence);
-      batch->last_fence = NULL;
-   }
-   if (batch->map) {
-      dri_bo_unmap(batch->buf);
-      batch->map = NULL;
-   }
-   dri_bo_unreference(batch->buf);
-   batch->buf = NULL;
-   free(batch);
+   free(intel->batch.cpu_map);
+   drm_intel_bo_unreference(intel->batch.last_bo);
+   drm_intel_bo_unreference(intel->batch.bo);
 }
 
-
-
-/* TODO: Push this whole function into bufmgr.
- */
 static void
-do_flush_locked(struct intel_batchbuffer *batch,
-               GLuint used,
-               GLboolean ignore_cliprects, GLboolean allow_unlock)
+do_batch_dump(struct intel_context *intel)
 {
-   struct intel_context *intel = batch->intel;
-   void *start;
-   GLuint count;
+   struct drm_intel_decode *decode;
+   struct intel_batchbuffer *batch = &intel->batch;
+   int ret;
 
-   start = dri_process_relocs(batch->buf, &count);
+   decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID);
+   if (!decode)
+      return;
 
-   batch->map = NULL;
-   batch->ptr = NULL;
-   batch->flags = 0;
+   ret = drm_intel_bo_map(batch->bo, false);
+   if (ret == 0) {
+      drm_intel_decode_set_batch_pointer(decode,
+                                        batch->bo->virtual,
+                                        batch->bo->offset,
+                                        batch->used);
+   } else {
+      fprintf(stderr,
+             "WARNING: failed to map batchbuffer (%s), "
+             "dumping uploaded data instead.\n", strerror(ret));
 
-   /* Throw away non-effective packets.  Won't work once we have
-    * hardware contexts which would preserve statechanges beyond a
-    * single buffer.
-    */
+      drm_intel_decode_set_batch_pointer(decode,
+                                        batch->map,
+                                        batch->bo->offset,
+                                        batch->used);
+   }
 
-   if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
-      if (intel->intelScreen->ttm == GL_TRUE) {
-        intel_exec_ioctl(batch->intel,
-                         used, ignore_cliprects, allow_unlock,
-                         start, count, &batch->last_fence);
-      } else {
-        intel_batch_ioctl(batch->intel,
-                          batch->buf->offset,
-                          used, ignore_cliprects, allow_unlock);
-      }
+   drm_intel_decode(decode);
+
+   drm_intel_decode_context_free(decode);
+
+   if (ret == 0) {
+      drm_intel_bo_unmap(batch->bo);
+
+      if (intel->vtbl.debug_batch != NULL)
+        intel->vtbl.debug_batch(intel);
    }
-      
-   dri_post_submit(batch->buf, &batch->last_fence);
-
-   if (intel->numClipRects == 0 && !ignore_cliprects) {
-      if (allow_unlock) {
-        /* If we are not doing any actual user-visible rendering,
-         * do a sched_yield to keep the app from pegging the cpu while
-         * achieving nothing.
-         */
-         UNLOCK_HARDWARE(intel);
-         sched_yield();
-         LOCK_HARDWARE(intel);
+}
+
+/* TODO: Push this whole function into bufmgr.
+ */
+static int
+do_flush_locked(struct intel_context *intel)
+{
+   struct intel_batchbuffer *batch = &intel->batch;
+   int ret = 0;
+
+   ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+
+   if (!intel->intelScreen->no_hw) {
+      if (ret == 0) {
+         if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub)
+            intel->vtbl.annotate_aub(intel);
+         ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
+                                     I915_EXEC_RENDER);
       }
-      intel->vtbl.lost_hardware(intel);
    }
 
-   if (INTEL_DEBUG & DEBUG_BATCH) {
-      //      dri_bo_map(batch->buf, GL_FALSE);
-      //      intel_decode(ptr, used / 4, batch->buf->offset,
-      //                  intel->intelScreen->deviceID);
-      //      dri_bo_unmap(batch->buf);
+   if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+      do_batch_dump(intel);
+
+   if (ret != 0) {
+      fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
+      exit(1);
    }
+   intel->vtbl.new_batch(intel);
+
+   return ret;
 }
 
-void
-intel_batchbuffer_flush(struct intel_batchbuffer *batch)
+int
+_intel_batchbuffer_flush(struct intel_context *intel,
+                        const char *file, int line)
 {
-   struct intel_context *intel = batch->intel;
-   GLuint used = batch->ptr - batch->map;
-   GLboolean was_locked = intel->locked;
+   int ret;
 
-   if (used == 0)
-      return;
+   if (intel->batch.used == 0)
+      return 0;
 
-   /* Add the MI_BATCH_BUFFER_END.  Always add an MI_FLUSH - this is a
-    * performance drain that we would like to avoid.
-    */
-   if (used & 4) {
-      ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
-      ((int *) batch->ptr)[1] = 0;
-      ((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
-      used += 12;
+   if (intel->first_post_swapbuffers_batch == NULL) {
+      intel->first_post_swapbuffers_batch = intel->batch.bo;
+      drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
    }
-   else {
-      ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
-      ((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
-      used += 8;
+
+   if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+      fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
+             4*intel->batch.used);
+
+   intel->batch.reserved_space = 0;
+
+   if (intel->vtbl.finish_batch)
+      intel->vtbl.finish_batch(intel);
+
+   /* Mark the end of the buffer. */
+   intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
+   if (intel->batch.used & 1) {
+      /* Round batchbuffer usage to 2 DWORDs. */
+      intel_batchbuffer_emit_dword(intel, MI_NOOP);
    }
 
-   /* TODO: Just pass the relocation list and dma buffer up to the
-    * kernel.
-    */
-   if (!was_locked)
-      LOCK_HARDWARE(intel);
+   intel_upload_finish(intel);
+
+   /* Check that we didn't just wrap our batchbuffer at a bad time. */
+   assert(!intel->no_batch_wrap);
+
+   ret = do_flush_locked(intel);
 
-   do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
-                  GL_FALSE);
-     
-   if (!was_locked)
-      UNLOCK_HARDWARE(intel);
+   if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
+      fprintf(stderr, "waiting for idle\n");
+      drm_intel_bo_wait_rendering(intel->batch.bo);
+   }
 
    /* Reset the buffer:
     */
-   intel_batchbuffer_reset(batch);
-}
+   intel_batchbuffer_reset(intel);
 
-void
-intel_batchbuffer_finish(struct intel_batchbuffer *batch)
-{
-   intel_batchbuffer_flush(batch);
-   if (batch->last_fence != NULL)
-      dri_fence_wait(batch->last_fence);
+   return ret;
 }
 
 
 /*  This is the only way buffers get added to the validate list.
  */
-GLboolean
-intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
-                             dri_bo *buffer,
-                             GLuint flags, GLuint delta)
+bool
+intel_batchbuffer_emit_reloc(struct intel_context *intel,
+                             drm_intel_bo *buffer,
+                             uint32_t read_domains, uint32_t write_domain,
+                            uint32_t delta)
 {
-   dri_emit_reloc(batch->buf, flags, delta, batch->ptr - batch->map, buffer);
-   batch->ptr += 4;
+   int ret;
 
-   return GL_TRUE;
+   ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
+                                buffer, delta,
+                                read_domains, write_domain);
+   assert(ret == 0);
+   (void)ret;
+
+   /*
+    * Using the old buffer offset, write in what the right data would be, in case
+    * the buffer doesn't move and we can short-circuit the relocation processing
+    * in the kernel
+    */
+   intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+
+   return true;
+}
+
+bool
+intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
+                                   drm_intel_bo *buffer,
+                                   uint32_t read_domains,
+                                   uint32_t write_domain,
+                                   uint32_t delta)
+{
+   int ret;
+
+   ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
+                                      buffer, delta,
+                                      read_domains, write_domain);
+   assert(ret == 0);
+   (void)ret;
+
+   /*
+    * Using the old buffer offset, write in what the right data would
+    * be, in case the buffer doesn't move and we can short-circuit the
+    * relocation processing in the kernel
+    */
+   intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
+
+   return true;
 }
 
 void
-intel_batchbuffer_data(struct intel_batchbuffer *batch,
-                       const void *data, GLuint bytes, GLuint flags)
+intel_batchbuffer_data(struct intel_context *intel,
+                       const void *data, GLuint bytes)
 {
    assert((bytes & 3) == 0);
-   intel_batchbuffer_require_space(batch, bytes, flags);
-   __memcpy(batch->ptr, data, bytes);
-   batch->ptr += bytes;
+   intel_batchbuffer_require_space(intel, bytes);
+   memcpy(intel->batch.map + intel->batch.used, data, bytes);
+   intel->batch.used += bytes >> 2;
+}
+
+/* Emit a pipelined flush to either flush render and texture cache for
+ * reading from a FBO-drawn texture, or flush so that frontbuffer
+ * render appears on the screen in DRI1.
+ *
+ * This is also used for the always_flush_cache driconf debug option.
+ */
+void
+intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
+{
+   BEGIN_BATCH(1);
+   OUT_BATCH(MI_FLUSH);
+   ADVANCE_BATCH();
 }