mesa: add/update comments in _mesa_copy_buffer_subdata()
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_batch.c
index f363a922c1d8d7bb76dd505cce166078e9e910d4..81d034ce822770c8209347db2d0db0e52d2afad4 100644 (file)
   *   Keith Whitwell <keith@tungstengraphics.com>
   */
      
-
-
 #include "brw_state.h"
 #include "intel_batchbuffer.h"
 #include "main/imports.h"
+#include "glsl/ralloc.h"
 
-
-
-/* A facility similar to the data caching code above, which aims to
- * prevent identical commands being issued repeatedly.
- */
-GLboolean brw_cached_batch_struct( struct brw_context *brw,
-                                  const void *data,
-                                  GLuint sz )
+static void
+brw_track_state_batch(struct brw_context *brw,
+                     enum state_struct_type type,
+                     uint32_t offset,
+                     int size)
 {
-   struct brw_cached_batch_item *item = brw->cached_batch_items;
-   struct header *newheader = (struct header *)data;
-
-   if (brw->emit_state_always) {
-      intel_batchbuffer_data(&brw->intel, data, sz, false);
-      return GL_TRUE;
-   }
-
-   while (item) {
-      if (item->header->opcode == newheader->opcode) {
-        if (item->sz == sz && memcmp(item->header, newheader, sz) == 0)
-           return GL_FALSE;
-        if (item->sz != sz) {
-           free(item->header);
-           item->header = malloc(sz);
-           item->sz = sz;
-        }
-        goto emit;
-      }
-      item = item->next;
-   }
-
-   assert(!item);
-   item = CALLOC_STRUCT(brw_cached_batch_item);
-   item->header = malloc(sz);
-   item->sz = sz;
-   item->next = brw->cached_batch_items;
-   brw->cached_batch_items = item;
-
- emit:
-   memcpy(item->header, newheader, sz);
-   intel_batchbuffer_data(&brw->intel, data, sz, false);
-   return GL_TRUE;
-}
-
-void brw_clear_batch_cache( struct brw_context *brw )
-{
-   struct brw_cached_batch_item *item = brw->cached_batch_items;
+   struct intel_batchbuffer *batch = &brw->intel.batch;
 
-   while (item) {
-      struct brw_cached_batch_item *next = item->next;
-      free((void *)item->header);
-      free(item);
-      item = next;
+   if (!brw->state_batch_list) {
+      /* Our structs are always aligned to at least 32 bytes, so
+       * our array doesn't need to be any larger
+       */
+      brw->state_batch_list = ralloc_size(brw, sizeof(*brw->state_batch_list) *
+                                         batch->bo->size / 32);
    }
 
-   brw->cached_batch_items = NULL;
-}
-
-void brw_destroy_batch_cache( struct brw_context *brw )
-{
-   brw_clear_batch_cache(brw);
+   brw->state_batch_list[brw->state_batch_count].offset = offset;
+   brw->state_batch_list[brw->state_batch_count].size = size;
+   brw->state_batch_list[brw->state_batch_count].type = type;
+   brw->state_batch_count++;
 }
 
 /**
@@ -114,6 +72,7 @@ void brw_destroy_batch_cache( struct brw_context *brw )
  */
 void *
 brw_state_batch(struct brw_context *brw,
+               enum state_struct_type type,
                int size,
                int alignment,
                uint32_t *out_offset)
@@ -136,6 +95,9 @@ brw_state_batch(struct brw_context *brw,
 
    batch->state_batch_offset = offset;
 
+   if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+      brw_track_state_batch(brw, type, offset, size);
+
    *out_offset = offset;
    return batch->map + (offset>>2);
 }