iris: Initial commit of a new 'iris' driver for Intel Gen8+ GPUs.
authorKenneth Graunke <kenneth@whitecape.org>
Fri, 24 Nov 2017 07:15:14 +0000 (23:15 -0800)
committerKenneth Graunke <kenneth@whitecape.org>
Thu, 21 Feb 2019 18:26:04 +0000 (10:26 -0800)
This commit introduces a new Gallium driver for Intel Gen8+ GPUs,
named 'iris_dri.so' after the hardware.

Developed by:
- Kenneth Graunke (overall driver)
- Dave Airlie (shaders, conditional render, overflow query, Gen8 port)
- Chris Wilson (fencing, pinned memory, ...)
- Jordan Justen (compute shaders)
- Jason Ekstrand (image load store)
- Caio Marcelo de Oliveira Filho (tessellation control passthrough)
- Rafael Antognolli (auxiliary buffer fixes)
- The rest of the i965 contributors and the Mesa community

29 files changed:
include/pci_ids/i965_pci_ids.h
meson.build
meson_options.txt
src/gallium/auxiliary/pipe-loader/pipe_loader_drm.c
src/gallium/auxiliary/target-helpers/drm_helper.h
src/gallium/auxiliary/target-helpers/drm_helper_public.h
src/gallium/drivers/iris/iris_batch.c [new file with mode: 0644]
src/gallium/drivers/iris/iris_batch.h [new file with mode: 0644]
src/gallium/drivers/iris/iris_bufmgr.c [new file with mode: 0644]
src/gallium/drivers/iris/iris_bufmgr.h [new file with mode: 0644]
src/gallium/drivers/iris/iris_context.h [new file with mode: 0644]
src/gallium/drivers/iris/iris_draw.c [new file with mode: 0644]
src/gallium/drivers/iris/iris_formats.c [new file with mode: 0644]
src/gallium/drivers/iris/iris_pipe.c [new file with mode: 0644]
src/gallium/drivers/iris/iris_program.c [new file with mode: 0644]
src/gallium/drivers/iris/iris_resource.c [new file with mode: 0644]
src/gallium/drivers/iris/iris_resource.h [new file with mode: 0644]
src/gallium/drivers/iris/iris_screen.c [new file with mode: 0644]
src/gallium/drivers/iris/iris_screen.h [new file with mode: 0644]
src/gallium/drivers/iris/iris_state.c [new file with mode: 0644]
src/gallium/drivers/iris/meson.build [new file with mode: 0644]
src/gallium/meson.build
src/gallium/targets/dri/meson.build
src/gallium/targets/dri/target.c
src/gallium/winsys/iris/drm/iris_drm_public.h [new file with mode: 0644]
src/gallium/winsys/iris/drm/iris_drm_winsys.c [new file with mode: 0644]
src/gallium/winsys/iris/drm/meson.build [new file with mode: 0644]
src/loader/pci_id_driver_map.h
src/meson.build

index b91abd7a3f99e074dca54f2f60a03a78faecf11a..2aa5bceafcac86aa0d04eac878e45c2b205cac8b 100644 (file)
@@ -1,3 +1,4 @@
+#ifndef IRIS
 CHIPSET(0x29A2, i965,    "Intel(R) 965G")
 CHIPSET(0x2992, i965,    "Intel(R) 965Q")
 CHIPSET(0x2982, i965,    "Intel(R) 965G")
@@ -91,6 +92,11 @@ CHIPSET(0x0F32, byt,     "Intel(R) Bay Trail")
 CHIPSET(0x0F33, byt,     "Intel(R) Bay Trail")
 CHIPSET(0x0157, byt,     "Intel(R) Bay Trail")
 CHIPSET(0x0155, byt,     "Intel(R) Bay Trail")
+CHIPSET(0x22B0, chv,     "Intel(R) HD Graphics (Cherrytrail)")
+CHIPSET(0x22B1, chv,     "Intel(R) HD Graphics XXX (Braswell)") /* Overridden in brw_get_renderer_string */
+CHIPSET(0x22B2, chv,     "Intel(R) HD Graphics (Cherryview)")
+CHIPSET(0x22B3, chv,     "Intel(R) HD Graphics (Cherryview)")
+#endif
 CHIPSET(0x1602, bdw_gt1, "Intel(R) Broadwell GT1")
 CHIPSET(0x1606, bdw_gt1, "Intel(R) Broadwell GT1")
 CHIPSET(0x160A, bdw_gt1, "Intel(R) Broadwell GT1")
@@ -109,10 +115,6 @@ CHIPSET(0x162A, bdw_gt3, "Intel(R) Iris Pro P6300 (Broadwell GT3e)")
 CHIPSET(0x162B, bdw_gt3, "Intel(R) Iris 6100 (Broadwell GT3)")
 CHIPSET(0x162D, bdw_gt3, "Intel(R) Broadwell GT3")
 CHIPSET(0x162E, bdw_gt3, "Intel(R) Broadwell GT3")
-CHIPSET(0x22B0, chv,     "Intel(R) HD Graphics (Cherrytrail)")
-CHIPSET(0x22B1, chv,     "Intel(R) HD Graphics XXX (Braswell)") /* Overridden in brw_get_renderer_string */
-CHIPSET(0x22B2, chv,     "Intel(R) HD Graphics (Cherryview)")
-CHIPSET(0x22B3, chv,     "Intel(R) HD Graphics (Cherryview)")
 CHIPSET(0x1902, skl_gt1, "Intel(R) HD Graphics 510 (Skylake GT1)")
 CHIPSET(0x1906, skl_gt1, "Intel(R) HD Graphics 510 (Skylake GT1)")
 CHIPSET(0x190A, skl_gt1, "Intel(R) Skylake GT1")
index fbc02970e68c7b06ce2b6954e16aa4f821dcd794..2d18881fff441d2ef572e886de654dd77008e592 100644 (file)
@@ -157,6 +157,7 @@ with_gallium_v3d = _drivers.contains('v3d')
 with_gallium_panfrost = _drivers.contains('panfrost')
 with_gallium_etnaviv = _drivers.contains('etnaviv')
 with_gallium_tegra = _drivers.contains('tegra')
+with_gallium_iris = _drivers.contains('iris')
 with_gallium_i915 = _drivers.contains('i915')
 with_gallium_svga = _drivers.contains('svga')
 with_gallium_virgl = _drivers.contains('virgl')
index 4983cb6386a3ee772a749443429801881c37453a..a723b5406cfbd90012bdb56201f90ed01605cf20 100644 (file)
@@ -60,7 +60,7 @@ option(
   choices : [
     '', 'auto', 'kmsro', 'radeonsi', 'r300', 'r600', 'nouveau', 'freedreno',
     'swrast', 'v3d', 'vc4', 'etnaviv', 'tegra', 'i915', 'svga', 'virgl',
-    'swr', 'panfrost'
+    'swr', 'panfrost', 'iris'
   ],
   description : 'List of gallium drivers to build. If this is set to auto all drivers applicable to the target OS/architecture will be built'
 )
index 2ddbb2d125b337a4789083478c17ce7d34482762..17d4a58e3933efa88666e5c93c371d0836dfdfc0 100644 (file)
@@ -71,6 +71,11 @@ static const struct drm_driver_descriptor driver_descriptors[] = {
         .create_screen = pipe_i915_create_screen,
         .configuration = pipe_default_configuration_query,
     },
+    {
+        .driver_name = "iris",
+        .create_screen = pipe_iris_create_screen,
+        .configuration = pipe_default_configuration_query,
+    },
     {
         .driver_name = "nouveau",
         .create_screen = pipe_nouveau_create_screen,
index 85b026b5264aea47c9d1e7c5f4d5693a924b0c0a..57ca74dfcb0067144186866912c0f5a429d8f580 100644 (file)
@@ -60,6 +60,29 @@ pipe_i915_create_screen(int fd, const struct pipe_screen_config *config)
 
 #endif
 
+#ifdef GALLIUM_IRIS
+#include "iris/drm/iris_drm_public.h"
+
+struct pipe_screen *
+pipe_iris_create_screen(int fd, const struct pipe_screen_config *config)
+{
+   struct pipe_screen *screen;
+
+   screen = iris_drm_screen_create(fd);
+   return screen ? debug_screen_wrap(screen) : NULL;
+}
+
+#else
+
+struct pipe_screen *
+pipe_iris_create_screen(int fd, const struct pipe_screen_config *config)
+{
+   fprintf(stderr, "iris: driver missing\n");
+   return NULL;
+}
+
+#endif
+
 #ifdef GALLIUM_NOUVEAU
 #include "nouveau/drm/nouveau_drm_public.h"
 
index 0108a7f0bbb7ac6c72098666b31a82bd4989f69d..1f36ccb6945ac1fe976b08d5ce40e237b4c8f0f5 100644 (file)
@@ -11,7 +11,7 @@ struct pipe_screen *
 pipe_i915_create_screen(int fd, const struct pipe_screen_config *config);
 
 struct pipe_screen *
-pipe_ilo_create_screen(int fd, const struct pipe_screen_config *config);
+pipe_iris_create_screen(int fd, const struct pipe_screen_config *config);
 
 struct pipe_screen *
 pipe_nouveau_create_screen(int fd, const struct pipe_screen_config *config);
diff --git a/src/gallium/drivers/iris/iris_batch.c b/src/gallium/drivers/iris/iris_batch.c
new file mode 100644 (file)
index 0000000..e63584f
--- /dev/null
@@ -0,0 +1,620 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "iris_batch.h"
+#include "iris_bufmgr.h"
+#include "iris_context.h"
+
+#include "drm-uapi/i915_drm.h"
+
+#include "util/hash_table.h"
+#include "main/macros.h"
+
+#include <errno.h>
+#include <xf86drm.h>
+
+#define FILE_DEBUG_FLAG DEBUG_BUFMGR
+
+/**
+ * Target sizes of the batch and state buffers.  We create the initial
+ * buffers at these sizes, and flush when they're nearly full.  If we
+ * underestimate how close we are to the end, and suddenly need more space
+ * in the middle of a draw, we can grow the buffers, and finish the draw.
+ * At that point, we'll be over our target size, so the next operation
+ * should flush.  Each time we flush the batch, we recreate both buffers
+ * at the original target size, so it doesn't grow without bound.
+ */
+#define BATCH_SZ (20 * 1024)
+#define STATE_SZ (16 * 1024)
+
+/* The kernel assumes batchbuffers are smaller than 256kB. */
+#define MAX_BATCH_SIZE (256 * 1024)
+
+/* 3DSTATE_BINDING_TABLE_POINTERS has a U16 offset from Surface State Base
+ * Address, which means that we can't put binding tables beyond 64kB.  This
+ * effectively limits the maximum statebuffer size to 64kB.
+ */
+#define MAX_STATE_SIZE (64 * 1024)
+
+static unsigned
+iris_batch_used(struct iris_batch *batch)
+{
+   return batch->cmd_map_next - batch->cmd_map;
+}
+
+static unsigned
+iris_state_used(struct iris_batch *batch)
+{
+   return batch->state_map_next - batch->state_map;
+}
+
+static void
+iris_batch_reset(struct iris_batch *batch);
+
+static bool
+uint_key_compare(const void *a, const void *b)
+{
+   return a == b;
+}
+
+static uint32_t
+uint_key_hash(const void *key)
+{
+   return (uintptr_t) key;
+}
+
+static void
+init_reloc_list(struct iris_reloc_list *rlist, int count)
+{
+   rlist->reloc_count = 0;
+   rlist->reloc_array_size = count;
+   rlist->relocs = malloc(rlist->reloc_array_size *
+                          sizeof(struct drm_i915_gem_relocation_entry));
+}
+
+void
+iris_batch_init(struct iris_batch *batch,
+                struct iris_screen *screen,
+                struct pipe_debug_callback *dbg)
+{
+   batch->screen = screen;
+   batch->dbg = dbg;
+
+   init_reloc_list(&batch->batch_relocs, 256);
+   init_reloc_list(&batch->state_relocs, 256);
+
+   batch->exec_count = 0;
+   batch->exec_array_size = 100;
+   batch->exec_bos =
+      malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
+   batch->validation_list =
+      malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
+
+   if (unlikely(INTEL_DEBUG)) {
+      batch->state_sizes =
+         _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
+   }
+
+   iris_batch_reset(batch);
+}
+
+#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
+
+static unsigned
+add_exec_bo(struct iris_batch *batch, struct iris_bo *bo)
+{
+   unsigned index = READ_ONCE(bo->index);
+
+   if (index < batch->exec_count && batch->exec_bos[index] == bo)
+      return index;
+
+   /* May have been shared between multiple active batches */
+   for (index = 0; index < batch->exec_count; index++) {
+      if (batch->exec_bos[index] == bo)
+         return index;
+   }
+
+   iris_bo_reference(bo);
+
+   if (batch->exec_count == batch->exec_array_size) {
+      batch->exec_array_size *= 2;
+      batch->exec_bos =
+         realloc(batch->exec_bos,
+                 batch->exec_array_size * sizeof(batch->exec_bos[0]));
+      batch->validation_list =
+         realloc(batch->validation_list,
+                 batch->exec_array_size * sizeof(batch->validation_list[0]));
+   }
+
+   batch->validation_list[batch->exec_count] =
+      (struct drm_i915_gem_exec_object2) {
+         .handle = bo->gem_handle,
+         .alignment = bo->align,
+         .offset = bo->gtt_offset,
+         .flags = bo->kflags,
+      };
+
+   bo->index = batch->exec_count;
+   batch->exec_bos[batch->exec_count] = bo;
+   batch->aperture_space += bo->size;
+
+   return batch->exec_count++;
+}
+
+static void
+iris_batch_reset(struct iris_batch *batch)
+{
+   struct iris_screen *screen = batch->screen;
+   struct iris_bufmgr *bufmgr = screen->bufmgr;
+
+   if (batch->last_cmd_bo != NULL) {
+      iris_bo_unreference(batch->last_cmd_bo);
+      batch->last_cmd_bo = NULL;
+   }
+   batch->last_cmd_bo = batch->cmd_bo;
+
+   batch->cmd_bo = iris_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
+   batch->cmd_map = iris_bo_map(NULL, batch->cmd_bo, MAP_READ | MAP_WRITE);
+   batch->cmd_map_next = batch->cmd_map;
+
+   batch->state_bo = iris_bo_alloc(bufmgr, "statebuffer", STATE_SZ, 4096);
+   batch->state_bo->kflags = EXEC_OBJECT_CAPTURE;
+   batch->state_map =
+      iris_bo_map(NULL, batch->state_bo, MAP_READ | MAP_WRITE);
+
+   /* Avoid making 0 a valid state offset - otherwise the decoder will try
+    * and decode data when we use offset 0 as a null pointer.
+    */
+   batch->state_map_next = batch->state_map + 1;
+
+   add_exec_bo(batch, batch->cmd_bo);
+   assert(batch->cmd_bo->index == 0);
+
+   if (batch->state_sizes)
+      _mesa_hash_table_clear(batch->state_sizes, NULL);
+}
+
+static void
+iris_batch_reset_and_clear_render_cache(struct iris_batch *batch)
+{
+   iris_batch_reset(batch);
+   // XXX: iris_render_cache_set_clear(batch);
+}
+
+void
+iris_batch_free(struct iris_batch *batch)
+{
+   for (int i = 0; i < batch->exec_count; i++) {
+      iris_bo_unreference(batch->exec_bos[i]);
+   }
+   free(batch->batch_relocs.relocs);
+   free(batch->state_relocs.relocs);
+   free(batch->exec_bos);
+   free(batch->validation_list);
+
+   iris_bo_unreference(batch->cmd_bo);
+   iris_bo_unreference(batch->state_bo);
+   iris_bo_unreference(batch->last_cmd_bo);
+   if (batch->state_sizes)
+      _mesa_hash_table_destroy(batch->state_sizes, NULL);
+}
+
+/**
+ * Grow either the batch or state buffer to a new larger size.
+ *
+ * We can't actually grow buffers, so we allocate a new one, copy over
+ * the existing contents, and update our lists to refer to the new one.
+ *
+ * Note that this is only temporary - each new batch recreates the buffers
+ * at their original target size (BATCH_SZ or STATE_SZ).
+ */
+static void
+grow_buffer(struct iris_batch *batch,
+            struct iris_bo **bo_ptr,
+            void **map_ptr,
+            void **map_next_ptr,
+            unsigned new_size)
+{
+   struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
+
+   const unsigned existing_bytes = *map_next_ptr - *map_ptr;
+
+   void *old_map = *map_ptr;
+   struct iris_bo *old_bo = *bo_ptr;
+
+   struct iris_bo *new_bo = iris_bo_alloc(bufmgr, old_bo->name, new_size, 4096);
+
+   perf_debug(batch->dbg, "Growing %s - ran out of space\n", old_bo->name);
+
+   void *new_map = iris_bo_map(NULL, new_bo, MAP_READ | MAP_WRITE);
+   memcpy(new_map, old_map, existing_bytes);
+
+   /* Try to put the new BO at the same GTT offset as the old BO (which
+    * we're throwing away, so it doesn't need to be there).
+    *
+    * This guarantees that our relocations continue to work: values we've
+    * already written into the buffer, values we're going to write into the
+    * buffer, and the validation/relocation lists all will match.
+    */
+   new_bo->gtt_offset = old_bo->gtt_offset;
+   new_bo->index = old_bo->index;
+
+   /* Batch/state buffers are per-context, and if we've run out of space,
+    * we must have actually used them before, so...they will be in the list.
+    */
+   assert(old_bo->index < batch->exec_count);
+   assert(batch->exec_bos[old_bo->index] == old_bo);
+
+   /* Update the validation list to use the new BO. */
+   batch->exec_bos[old_bo->index] = new_bo;
+   batch->validation_list[old_bo->index].handle = new_bo->gem_handle;
+   iris_bo_reference(new_bo);
+   iris_bo_unreference(old_bo);
+
+   /* Drop the *bo_ptr reference.  This should free the old BO. */
+   iris_bo_unreference(old_bo);
+
+   *bo_ptr = new_bo;
+   *map_ptr = new_map;
+   *map_next_ptr = new_map + existing_bytes;
+}
+
+void
+iris_require_command_space(struct iris_batch *batch, unsigned size)
+{
+   if (iris_batch_used(batch) + size >= BATCH_SZ) {
+      if (!batch->no_wrap) {
+         iris_batch_flush(batch);
+      } else {
+         const unsigned new_size =
+            MIN2(batch->cmd_bo->size + batch->cmd_bo->size / 2,
+                 MAX_BATCH_SIZE);
+         grow_buffer(batch, &batch->cmd_bo, &batch->cmd_map,
+                     &batch->cmd_map_next, new_size);
+         assert(iris_batch_used(batch) + size < batch->cmd_bo->size);
+      }
+   }
+}
+
+void
+iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
+{
+   iris_require_command_space(batch, size);
+   memcpy(batch->cmd_map_next, data, size);
+}
+
+/**
+ * Called when starting a new batch buffer.
+ */
+static void
+iris_new_batch(struct iris_batch *batch)
+{
+   /* Unreference any BOs held by the previous batch, and reset counts. */
+   for (int i = 0; i < batch->exec_count; i++) {
+      iris_bo_unreference(batch->exec_bos[i]);
+      batch->exec_bos[i] = NULL;
+   }
+   batch->batch_relocs.reloc_count = 0;
+   batch->state_relocs.reloc_count = 0;
+   batch->exec_count = 0;
+   batch->aperture_space = 0;
+
+   iris_bo_unreference(batch->state_bo);
+
+   /* Create a new batchbuffer and reset the associated state: */
+   iris_batch_reset_and_clear_render_cache(batch);
+}
+
+/**
+ * Called from iris_batch_flush before emitting MI_BATCHBUFFER_END and
+ * sending it off.
+ *
+ * This function can emit state (say, to preserve registers that aren't saved
+ * between batches).
+ */
+static void
+iris_finish_batch(struct iris_batch *batch)
+{
+   batch->no_wrap = true;
+
+   /* Mark the end of the buffer. */
+   const uint32_t MI_BATCH_BUFFER_END = (0xA << 23);
+   iris_batch_emit(batch, &MI_BATCH_BUFFER_END, sizeof(uint32_t));
+
+   batch->no_wrap = false;
+}
+
+static int
+submit_batch(struct iris_batch *batch, int in_fence_fd, int *out_fence_fd)
+{
+   iris_bo_unmap(batch->cmd_bo);
+   iris_bo_unmap(batch->state_bo);
+
+   /* The requirement for using I915_EXEC_NO_RELOC are:
+    *
+    *   The addresses written in the objects must match the corresponding
+    *   reloc.gtt_offset which in turn must match the corresponding
+    *   execobject.offset.
+    *
+    *   Any render targets written to in the batch must be flagged with
+    *   EXEC_OBJECT_WRITE.
+    *
+    *   To avoid stalling, execobject.offset should match the current
+    *   address of that object within the active context.
+    */
+   int flags = I915_EXEC_NO_RELOC |
+               I915_EXEC_BATCH_FIRST |
+               I915_EXEC_HANDLE_LUT |
+               I915_EXEC_RENDER;
+
+   /* Set statebuffer relocations */
+   const unsigned state_index = batch->state_bo->index;
+   if (state_index < batch->exec_count &&
+       batch->exec_bos[state_index] == batch->state_bo) {
+      struct drm_i915_gem_exec_object2 *entry =
+         &batch->validation_list[state_index];
+      assert(entry->handle == batch->state_bo->gem_handle);
+      entry->relocation_count = batch->state_relocs.reloc_count;
+      entry->relocs_ptr = (uintptr_t) batch->state_relocs.relocs;
+   }
+
+   /* Set batchbuffer relocations */
+   struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[0];
+   assert(entry->handle == batch->cmd_bo->gem_handle);
+   entry->relocation_count = batch->batch_relocs.reloc_count;
+   entry->relocs_ptr = (uintptr_t) batch->batch_relocs.relocs;
+
+   struct drm_i915_gem_execbuffer2 execbuf = {
+      .buffers_ptr = (uintptr_t) batch->validation_list,
+      .buffer_count = batch->exec_count,
+      .batch_start_offset = 0,
+      .batch_len = iris_batch_used(batch),
+      .flags = flags,
+      .rsvd1 = batch->hw_ctx_id, /* rsvd1 is actually the context ID */
+   };
+
+   unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2;
+
+   if (in_fence_fd != -1) {
+      execbuf.rsvd2 = in_fence_fd;
+      execbuf.flags |= I915_EXEC_FENCE_IN;
+   }
+
+   if (out_fence_fd != NULL) {
+      cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR;
+      *out_fence_fd = -1;
+      execbuf.flags |= I915_EXEC_FENCE_OUT;
+   }
+
+   int ret = drm_ioctl(batch->screen->fd, cmd, &execbuf);
+   if (ret != 0)
+      ret = -errno;
+
+   for (int i = 0; i < batch->exec_count; i++) {
+      struct iris_bo *bo = batch->exec_bos[i];
+
+      bo->idle = false;
+      bo->index = -1;
+
+      /* Update iris_bo::gtt_offset */
+      if (batch->validation_list[i].offset != bo->gtt_offset) {
+         DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
+             bo->gem_handle, bo->gtt_offset,
+             batch->validation_list[i].offset);
+         bo->gtt_offset = batch->validation_list[i].offset;
+      }
+   }
+
+   if (ret == 0 && out_fence_fd != NULL)
+      *out_fence_fd = execbuf.rsvd2 >> 32;
+
+   return ret;
+}
+
+/**
+ * The in_fence_fd is ignored if -1.  Otherwise this function takes ownership
+ * of the fd.
+ *
+ * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
+ * of the returned fd.
+ */
+int
+_iris_batch_flush_fence(struct iris_batch *batch,
+                        int in_fence_fd, int *out_fence_fd,
+                        const char *file, int line)
+{
+   if (iris_batch_used(batch) == 0)
+      return 0;
+
+   /* Check that we didn't just wrap our batchbuffer at a bad time. */
+   assert(!batch->no_wrap);
+
+   iris_finish_batch(batch);
+
+   if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
+      int bytes_for_commands = iris_batch_used(batch);
+      int bytes_for_state = iris_state_used(batch);
+      fprintf(stderr, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%) (pkt),"
+              " %5db (%0.1f%%) (state), %4d BOs (%0.1fMb aperture),"
+              " %4d batch relocs, %4d state relocs\n", file, line,
+              bytes_for_commands, 100.0f * bytes_for_commands / BATCH_SZ,
+              bytes_for_state, 100.0f * bytes_for_state / STATE_SZ,
+              batch->exec_count,
+              (float) batch->aperture_space / (1024 * 1024),
+              batch->batch_relocs.reloc_count,
+              batch->state_relocs.reloc_count);
+   }
+
+   int ret = submit_batch(batch, in_fence_fd, out_fence_fd);
+   if (ret < 0)
+      return ret;
+
+   //throttle(brw);
+
+   //if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
+      //do_batch_dump(brw);
+
+   //if (brw->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
+      //iris_check_for_reset(ice);
+
+   if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
+      dbg_printf("waiting for idle\n");
+      iris_bo_wait_rendering(batch->cmd_bo);
+   }
+
+   /* Start a new batch buffer. */
+   iris_new_batch(batch);
+
+   return 0;
+}
+
+bool
+iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
+{
+   unsigned index = READ_ONCE(bo->index);
+   if (index < batch->exec_count && batch->exec_bos[index] == bo)
+      return true;
+
+   for (int i = 0; i < batch->exec_count; i++) {
+      if (batch->exec_bos[i] == bo)
+         return true;
+   }
+   return false;
+}
+
+/*  This is the only way buffers get added to the validate list.
+ */
+static uint64_t
+emit_reloc(struct iris_batch *batch,
+           struct iris_reloc_list *rlist, uint32_t offset,
+           struct iris_bo *target, uint32_t target_offset,
+           unsigned int reloc_flags)
+{
+   assert(target != NULL);
+
+   if (rlist->reloc_count == rlist->reloc_array_size) {
+      rlist->reloc_array_size *= 2;
+      rlist->relocs = realloc(rlist->relocs,
+                              rlist->reloc_array_size *
+                              sizeof(struct drm_i915_gem_relocation_entry));
+   }
+
+   unsigned int index = add_exec_bo(batch, target);
+   struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
+
+   rlist->relocs[rlist->reloc_count++] =
+      (struct drm_i915_gem_relocation_entry) {
+         .offset = offset,
+         .delta = target_offset,
+         .target_handle = index,
+         .presumed_offset = entry->offset,
+      };
+
+   /* Using the old buffer offset, write in what the right data would be, in
+    * case the buffer doesn't move and we can short-circuit the relocation
+    * processing in the kernel
+    */
+   return entry->offset + target_offset;
+}
+
+uint64_t
+iris_batch_reloc(struct iris_batch *batch, uint32_t batch_offset,
+                 struct iris_bo *target, uint32_t target_offset,
+                 unsigned int reloc_flags)
+{
+   assert(batch_offset <= batch->cmd_bo->size - sizeof(uint32_t));
+
+   return emit_reloc(batch, &batch->batch_relocs, batch_offset,
+                     target, target_offset, reloc_flags);
+}
+
+uint64_t
+iris_state_reloc(struct iris_batch *batch, uint32_t state_offset,
+                 struct iris_bo *target, uint32_t target_offset,
+                 unsigned int reloc_flags)
+{
+   assert(state_offset <= batch->state_bo->size - sizeof(uint32_t));
+
+   return emit_reloc(batch, &batch->state_relocs, state_offset,
+                     target, target_offset, reloc_flags);
+}
+
+
+static uint32_t
+iris_state_entry_size(struct iris_batch *batch, uint32_t offset)
+{
+   struct hash_entry *entry =
+      _mesa_hash_table_search(batch->state_sizes, (void *)(uintptr_t) offset);
+   return entry ? (uintptr_t) entry->data : 0;
+}
+
+/**
+ * Reserve some space in the statebuffer, or flush.
+ *
+ * This is used to estimate when we're near the end of the batch,
+ * so we can flush early.
+ */
+void
+iris_require_state_space(struct iris_batch *batch, unsigned size)
+{
+   if (iris_state_used(batch) + size >= STATE_SZ)
+      iris_batch_flush(batch);
+}
+
+/**
+ * Allocates a block of space in the batchbuffer for indirect state.
+ */
+void *
+iris_alloc_state(struct iris_batch *batch,
+                 int size, int alignment,
+                 uint32_t *out_offset)
+{
+   assert(size < batch->cmd_bo->size);
+
+   if (ALIGN(iris_state_used(batch), alignment) + size >= STATE_SZ) {
+      if (!batch->no_wrap) {
+         iris_batch_flush(batch);
+      } else {
+         const unsigned new_size =
+            MIN2(batch->state_bo->size + batch->state_bo->size / 2,
+                 MAX_STATE_SIZE);
+         grow_buffer(batch, &batch->state_bo, &batch->state_map,
+                     &batch->state_map_next, new_size);
+      }
+   }
+
+   unsigned offset = ALIGN(iris_state_used(batch), alignment);
+   assert(offset + size < batch->state_bo->size);
+
+   if (unlikely(batch->state_sizes)) {
+      _mesa_hash_table_insert(batch->state_sizes,
+                              (void *) (uintptr_t) offset,
+                              (void *) (uintptr_t) size);
+   }
+
+   batch->state_map_next = batch->state_map + offset + size;
+
+   *out_offset = offset;
+   return batch->state_map + offset;
+}
diff --git a/src/gallium/drivers/iris/iris_batch.h b/src/gallium/drivers/iris/iris_batch.h
new file mode 100644 (file)
index 0000000..4c68153
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef IRIS_BATCH_DOT_H
+#define IRIS_BATCH_DOT_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+struct iris_address {
+   struct iris_bo *bo;
+   unsigned reloc_flags;
+   uint32_t offset;
+};
+
+struct iris_reloc_list {
+   struct drm_i915_gem_relocation_entry *relocs;
+   int reloc_count;
+   int reloc_array_size;
+};
+
+struct iris_batch {
+   struct iris_screen *screen;
+   struct pipe_debug_callback *dbg;
+
+   /** Current batchbuffer being queued up. */
+   struct iris_bo *cmd_bo;
+   /** Current statebuffer being queued up. */
+   struct iris_bo *state_bo;
+
+   /** Last BO submitted to the hardware.  Used for glFinish(). */
+   struct iris_bo *last_cmd_bo;
+
+   uint32_t hw_ctx_id;
+
+   void *cmd_map_next;
+   void *cmd_map;
+   void *state_map;
+   void *state_map_next;
+
+   bool no_wrap;
+
+   struct iris_reloc_list batch_relocs;
+   struct iris_reloc_list state_relocs;
+
+   /** The validation list */
+   struct drm_i915_gem_exec_object2 *validation_list;
+   struct iris_bo **exec_bos;
+   int exec_count;
+   int exec_array_size;
+
+   /** The amount of aperture space (in bytes) used by all exec_bos */
+   int aperture_space;
+
+   /** Map from batch offset to iris_alloc_state data (with DEBUG_BATCH) */
+   struct hash_table *state_sizes;
+};
+
+void iris_batch_init(struct iris_batch *batch,
+                     struct iris_screen *screen,
+                     struct pipe_debug_callback *dbg);
+void iris_batch_free(struct iris_batch *batch);
+void iris_require_command_space(struct iris_batch *batch, unsigned size);
+void iris_require_state_space(struct iris_batch *batch, unsigned size);
+void iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size);
+void *iris_alloc_state(struct iris_batch *batch, int size, int alignment,
+                       uint32_t *out_offset);
+
+int _iris_batch_flush_fence(struct iris_batch *batch,
+                            int in_fence_fd, int *out_fence_fd,
+                            const char *file, int line);
+
+
+#define iris_batch_flush_fence(batch, in_fence_fd, out_fence_fd) \
+   _iris_batch_flush_fence((batch), (in_fence_fd), (out_fence_fd), \
+                           __FILE__, __LINE__)
+
+#define iris_batch_flush(batch) iris_batch_flush_fence((batch), -1, NULL)
+
+bool iris_batch_references(struct iris_batch *batch, struct iris_bo *bo);
+
+#define RELOC_WRITE EXEC_OBJECT_WRITE
+
+uint64_t iris_batch_reloc(struct iris_batch *batch,
+                          uint32_t batch_offset,
+                          struct iris_bo *target,
+                          uint32_t target_offset,
+                          unsigned flags);
+
+uint64_t iris_state_reloc(struct iris_batch *batch,
+                          uint32_t batch_offset,
+                          struct iris_bo *target,
+                          uint32_t target_offset,
+                          unsigned flags);
+#endif
diff --git a/src/gallium/drivers/iris/iris_bufmgr.c b/src/gallium/drivers/iris/iris_bufmgr.c
new file mode 100644 (file)
index 0000000..ce1066b
--- /dev/null
@@ -0,0 +1,1322 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <xf86drm.h>
+#include <util/u_atomic.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <stdbool.h>
+#include <time.h>
+
+#include "errno.h"
+#ifndef ETIME
+#define ETIME ETIMEDOUT
+#endif
+#include "common/gen_clflush.h"
+#include "common/gen_debug.h"
+#include "dev/gen_device_info.h"
+#include "main/macros.h"
+#include "util/macros.h"
+#include "util/hash_table.h"
+#include "util/list.h"
+#include "iris_bufmgr.h"
+#include "iris_context.h"
+#include "string.h"
+
+#include "drm-uapi/i915_drm.h"
+
+#ifdef HAVE_VALGRIND
+#include <valgrind.h>
+#include <memcheck.h>
+#define VG(x) x
+#else
+#define VG(x)
+#endif
+
+/* VALGRIND_FREELIKE_BLOCK unfortunately does not actually undo the earlier
+ * VALGRIND_MALLOCLIKE_BLOCK but instead leaves vg convinced the memory is
+ * leaked. All because it does not call VG(cli_free) from its
+ * VG_USERREQ__FREELIKE_BLOCK handler. Instead of treating the memory like
+ * and allocation, we mark it available for use upon mmapping and remove
+ * it upon unmapping.
+ */
+#define VG_DEFINED(ptr, size) VG(VALGRIND_MAKE_MEM_DEFINED(ptr, size))
+#define VG_NOACCESS(ptr, size) VG(VALGRIND_MAKE_MEM_NOACCESS(ptr, size))
+
+#define PAGE_SIZE 4096
+
+#define FILE_DEBUG_FLAG DEBUG_BUFMGR
+
+/**
+ * Call ioctl, restarting if it is interupted
+ */
+int
+drm_ioctl(int fd, unsigned long request, void *arg)
+{
+    int ret;
+
+    do {
+        ret = ioctl(fd, request, arg);
+    } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+    return ret;
+}
+
+
+
+static inline int
+atomic_add_unless(int *v, int add, int unless)
+{
+   int c, old;
+   c = p_atomic_read(v);
+   while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
+      c = old;
+   return c == unless;
+}
+
+struct bo_cache_bucket {
+   struct list_head head;
+   uint64_t size;
+};
+
+struct iris_bufmgr {
+   int fd;
+
+   mtx_t lock;
+
+   /** Array of lists of cached gem objects of power-of-two sizes */
+   struct bo_cache_bucket cache_bucket[14 * 4];
+   int num_buckets;
+   time_t time;
+
+   struct hash_table *name_table;
+   struct hash_table *handle_table;
+
+   bool has_llc:1;
+   bool bo_reuse:1;
+};
+
+static int bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
+                                  uint32_t stride);
+
+static void bo_free(struct iris_bo *bo);
+
+static uint32_t
+key_hash_uint(const void *key)
+{
+   return _mesa_hash_data(key, 4);
+}
+
+static bool
+key_uint_equal(const void *a, const void *b)
+{
+   return *((unsigned *) a) == *((unsigned *) b);
+}
+
+static struct iris_bo *
+hash_find_bo(struct hash_table *ht, unsigned int key)
+{
+   struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
+   return entry ? (struct iris_bo *) entry->data : NULL;
+}
+
+/**
+ * This function finds the correct bucket fit for the input size.
+ * The function works with O(1) complexity when the requested size
+ * was queried instead of iterating the size through all the buckets.
+ */
+static struct bo_cache_bucket *
+bucket_for_size(struct iris_bufmgr *bufmgr, uint64_t size)
+{
+   /* Calculating the pages and rounding up to the page size. */
+   const unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+
+   /* Row  Bucket sizes    clz((x-1) | 3)   Row    Column
+    *        in pages                      stride   size
+    *   0:   1  2  3  4 -> 30 30 30 30        4       1
+    *   1:   5  6  7  8 -> 29 29 29 29        4       1
+    *   2:  10 12 14 16 -> 28 28 28 28        8       2
+    *   3:  20 24 28 32 -> 27 27 27 27       16       4
+    */
+   const unsigned row = 30 - __builtin_clz((pages - 1) | 3);
+   const unsigned row_max_pages = 4 << row;
+
+   /* The '& ~2' is the special case for row 1. In row 1, max pages /
+    * 2 is 2, but the previous row maximum is zero (because there is
+    * no previous row). All row maximum sizes are power of 2, so that
+    * is the only case where that bit will be set.
+    */
+   const unsigned prev_row_max_pages = (row_max_pages / 2) & ~2;
+   int col_size_log2 = row - 1;
+   col_size_log2 += (col_size_log2 < 0);
+
+   const unsigned col = (pages - prev_row_max_pages +
+                        ((1 << col_size_log2) - 1)) >> col_size_log2;
+
+   /* Calculating the index based on the row and column. */
+   const unsigned index = (row * 4) + (col - 1);
+
+   return (index < bufmgr->num_buckets) ?
+          &bufmgr->cache_bucket[index] : NULL;
+}
+
+int
+iris_bo_busy(struct iris_bo *bo)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+   struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };
+
+   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+   if (ret == 0) {
+      bo->idle = !busy.busy;
+      return busy.busy;
+   }
+   return false;
+}
+
+int
+iris_bo_madvise(struct iris_bo *bo, int state)
+{
+   struct drm_i915_gem_madvise madv = {
+      .handle = bo->gem_handle,
+      .madv = state,
+      .retained = 1,
+   };
+
+   drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+
+   return madv.retained;
+}
+
+/* drop the oldest entries that have been purged by the kernel */
+static void
+iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr,
+                          struct bo_cache_bucket *bucket)
+{
+   list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
+      if (iris_bo_madvise(bo, I915_MADV_DONTNEED))
+         break;
+
+      list_del(&bo->head);
+      bo_free(bo);
+   }
+}
+
+static struct iris_bo *
+bo_alloc_internal(struct iris_bufmgr *bufmgr,
+                  const char *name,
+                  uint64_t size,
+                  unsigned flags,
+                  uint32_t tiling_mode,
+                  uint32_t stride, uint64_t alignment)
+{
+   struct iris_bo *bo;
+   unsigned int page_size = getpagesize();
+   int ret;
+   struct bo_cache_bucket *bucket;
+   bool alloc_from_cache;
+   uint64_t bo_size;
+   bool busy = false;
+   bool zeroed = false;
+
+   if (flags & BO_ALLOC_BUSY)
+      busy = true;
+
+   if (flags & BO_ALLOC_ZEROED)
+      zeroed = true;
+
+   /* BUSY does doesn't really jive with ZEROED as we have to wait for it to
+    * be idle before we can memset.  Just disallow that combination.
+    */
+   assert(!(busy && zeroed));
+
+   /* Round the allocated size up to a power of two number of pages. */
+   bucket = bucket_for_size(bufmgr, size);
+
+   /* If we don't have caching at this size, don't actually round the
+    * allocation up.
+    */
+   if (bucket == NULL) {
+      bo_size = size;
+      if (bo_size < page_size)
+         bo_size = page_size;
+   } else {
+      bo_size = bucket->size;
+   }
+
+   mtx_lock(&bufmgr->lock);
+   /* Get a buffer out of the cache if available */
+retry:
+   alloc_from_cache = false;
+   if (bucket != NULL && !list_empty(&bucket->head)) {
+      if (busy && !zeroed) {
+         /* Allocate new render-target BOs from the tail (MRU)
+          * of the list, as it will likely be hot in the GPU
+          * cache and in the aperture for us.  If the caller
+          * asked us to zero the buffer, we don't want this
+          * because we are going to mmap it.
+          */
+         bo = LIST_ENTRY(struct iris_bo, bucket->head.prev, head);
+         list_del(&bo->head);
+         alloc_from_cache = true;
+         bo->align = alignment;
+      } else {
+         assert(alignment == 0);
+         /* For non-render-target BOs (where we're probably
+          * going to map it first thing in order to fill it
+          * with data), check if the last BO in the cache is
+          * unbusy, and only reuse in that case. Otherwise,
+          * allocating a new buffer is probably faster than
+          * waiting for the GPU to finish.
+          */
+         bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
+         if (!iris_bo_busy(bo)) {
+            alloc_from_cache = true;
+            list_del(&bo->head);
+         }
+      }
+
+      if (alloc_from_cache) {
+         if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) {
+            bo_free(bo);
+            iris_bo_cache_purge_bucket(bufmgr, bucket);
+            goto retry;
+         }
+
+         if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
+            bo_free(bo);
+            goto retry;
+         }
+
+         if (zeroed) {
+            void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
+            if (!map) {
+               bo_free(bo);
+               goto retry;
+            }
+            memset(map, 0, bo_size);
+         }
+      }
+   }
+
+   if (!alloc_from_cache) {
+      bo = calloc(1, sizeof(*bo));
+      if (!bo)
+         goto err;
+
+      bo->size = bo_size;
+      bo->idle = true;
+
+      struct drm_i915_gem_create create = { .size = bo_size };
+
+      /* All new BOs we get from the kernel are zeroed, so we don't need to
+       * worry about that here.
+       */
+      ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+      if (ret != 0) {
+         free(bo);
+         goto err;
+      }
+
+      bo->gem_handle = create.handle;
+
+      bo->bufmgr = bufmgr;
+      bo->align = alignment;
+
+      bo->tiling_mode = I915_TILING_NONE;
+      bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+      bo->stride = 0;
+
+      if (bo_set_tiling_internal(bo, tiling_mode, stride))
+         goto err_free;
+
+      /* Calling set_domain() will allocate pages for the BO outside of the
+       * struct mutex lock in the kernel, which is more efficient than waiting
+       * to create them during the first execbuf that uses the BO.
+       */
+      struct drm_i915_gem_set_domain sd = {
+         .handle = bo->gem_handle,
+         .read_domains = I915_GEM_DOMAIN_CPU,
+         .write_domain = 0,
+      };
+
+      if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
+         goto err_free;
+   }
+
+   bo->name = name;
+   p_atomic_set(&bo->refcount, 1);
+   bo->reusable = true;
+   bo->cache_coherent = bufmgr->has_llc;
+   bo->index = -1;
+
+   mtx_unlock(&bufmgr->lock);
+
+   DBG("bo_create: buf %d (%s) %llub\n", bo->gem_handle, bo->name,
+       (unsigned long long) size);
+
+   return bo;
+
+err_free:
+   bo_free(bo);
+err:
+   mtx_unlock(&bufmgr->lock);
+   return NULL;
+}
+
+struct iris_bo *
+iris_bo_alloc(struct iris_bufmgr *bufmgr,
+             const char *name, uint64_t size, uint64_t alignment)
+{
+   return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
+}
+
+struct iris_bo *
+iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
+                   uint64_t size, uint32_t tiling_mode, uint32_t pitch,
+                   unsigned flags)
+{
+   return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch, 0);
+}
+
+/**
+ * Returns a iris_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+struct iris_bo *
+iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
+                            const char *name, unsigned int handle)
+{
+   struct iris_bo *bo;
+
+   /* At the moment most applications only have a few named bo.
+    * For instance, in a DRI client only the render buffers passed
+    * between X and the client are named. And since X returns the
+    * alternating names for the front/back buffer a linear search
+    * provides a sufficiently fast match.
+    */
+   mtx_lock(&bufmgr->lock);
+   bo = hash_find_bo(bufmgr->name_table, handle);
+   if (bo) {
+      iris_bo_reference(bo);
+      goto out;
+   }
+
+   struct drm_gem_open open_arg = { .name = handle };
+   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+   if (ret != 0) {
+      DBG("Couldn't reference %s handle 0x%08x: %s\n",
+          name, handle, strerror(errno));
+      bo = NULL;
+      goto out;
+   }
+   /* Now see if someone has used a prime handle to get this
+    * object from the kernel before by looking through the list
+    * again for a matching gem_handle
+    */
+   bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
+   if (bo) {
+      iris_bo_reference(bo);
+      goto out;
+   }
+
+   bo = calloc(1, sizeof(*bo));
+   if (!bo)
+      goto out;
+
+   p_atomic_set(&bo->refcount, 1);
+
+   bo->size = open_arg.size;
+   bo->gtt_offset = 0;
+   bo->bufmgr = bufmgr;
+   bo->gem_handle = open_arg.handle;
+   bo->name = name;
+   bo->global_name = handle;
+   bo->reusable = false;
+   bo->external = true;
+
+   _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
+   _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
+
+   struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
+   ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
+   if (ret != 0)
+      goto err_unref;
+
+   bo->tiling_mode = get_tiling.tiling_mode;
+   bo->swizzle_mode = get_tiling.swizzle_mode;
+   /* XXX stride is unknown */
+   DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
+
+out:
+   mtx_unlock(&bufmgr->lock);
+   return bo;
+
+err_unref:
+   bo_free(bo);
+   mtx_unlock(&bufmgr->lock);
+   return NULL;
+}
+
+static void
+bo_free(struct iris_bo *bo)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+   if (bo->map_cpu) {
+      VG_NOACCESS(bo->map_cpu, bo->size);
+      munmap(bo->map_cpu, bo->size);
+   }
+   if (bo->map_wc) {
+      VG_NOACCESS(bo->map_wc, bo->size);
+      munmap(bo->map_wc, bo->size);
+   }
+   if (bo->map_gtt) {
+      VG_NOACCESS(bo->map_gtt, bo->size);
+      munmap(bo->map_gtt, bo->size);
+   }
+
+   if (bo->external) {
+      struct hash_entry *entry;
+
+      if (bo->global_name) {
+         entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
+         _mesa_hash_table_remove(bufmgr->name_table, entry);
+      }
+
+      entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
+      _mesa_hash_table_remove(bufmgr->handle_table, entry);
+   }
+
+   /* Close this object */
+   struct drm_gem_close close = { .handle = bo->gem_handle };
+   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
+   if (ret != 0) {
+      DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+          bo->gem_handle, bo->name, strerror(errno));
+   }
+   free(bo);
+}
+
+/** Frees all cached buffers significantly older than @time. */
+static void
+cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
+{
+   int i;
+
+   if (bufmgr->time == time)
+      return;
+
+   for (i = 0; i < bufmgr->num_buckets; i++) {
+      struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
+
+      list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
+         if (time - bo->free_time <= 1)
+            break;
+
+         list_del(&bo->head);
+
+         bo_free(bo);
+      }
+   }
+
+   bufmgr->time = time;
+}
+
+static void
+bo_unreference_final(struct iris_bo *bo, time_t time)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+   struct bo_cache_bucket *bucket;
+
+   DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
+
+   bucket = bucket_for_size(bufmgr, bo->size);
+   /* Put the buffer into our internal cache for reuse if we can. */
+   if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
+       iris_bo_madvise(bo, I915_MADV_DONTNEED)) {
+      bo->free_time = time;
+
+      bo->name = NULL;
+      bo->kflags = 0;
+
+      list_addtail(&bo->head, &bucket->head);
+   } else {
+      bo_free(bo);
+   }
+}
+
+void
+iris_bo_unreference(struct iris_bo *bo)
+{
+   if (bo == NULL)
+      return;
+
+   assert(p_atomic_read(&bo->refcount) > 0);
+
+   if (atomic_add_unless(&bo->refcount, -1, 1)) {
+      struct iris_bufmgr *bufmgr = bo->bufmgr;
+      struct timespec time;
+
+      clock_gettime(CLOCK_MONOTONIC, &time);
+
+      mtx_lock(&bufmgr->lock);
+
+      if (p_atomic_dec_zero(&bo->refcount)) {
+         bo_unreference_final(bo, time.tv_sec);
+         cleanup_bo_cache(bufmgr, time.tv_sec);
+      }
+
+      mtx_unlock(&bufmgr->lock);
+   }
+}
+
+static void
+bo_wait_with_stall_warning(struct pipe_debug_callback *dbg,
+                           struct iris_bo *bo,
+                           const char *action)
+{
+   bool busy = dbg && !bo->idle;
+   double elapsed = unlikely(busy) ? -get_time() : 0.0;
+
+   iris_bo_wait_rendering(bo);
+
+   if (unlikely(busy)) {
+      elapsed += get_time();
+      if (elapsed > 1e-5) /* 0.01ms */ {
+         perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
+                    action, bo->name, elapsed * 1000);
+      }
+   }
+}
+
+static void
+print_flags(unsigned flags)
+{
+   if (flags & MAP_READ)
+      DBG("READ ");
+   if (flags & MAP_WRITE)
+      DBG("WRITE ");
+   if (flags & MAP_ASYNC)
+      DBG("ASYNC ");
+   if (flags & MAP_PERSISTENT)
+      DBG("PERSISTENT ");
+   if (flags & MAP_COHERENT)
+      DBG("COHERENT ");
+   if (flags & MAP_RAW)
+      DBG("RAW ");
+   DBG("\n");
+}
+
+static void *
+iris_bo_map_cpu(struct pipe_debug_callback *dbg,
+                struct iris_bo *bo, unsigned flags)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+   /* We disallow CPU maps for writing to non-coherent buffers, as the
+    * CPU map can become invalidated when a batch is flushed out, which
+    * can happen at unpredictable times.  You should use WC maps instead.
+    */
+   assert(bo->cache_coherent || !(flags & MAP_WRITE));
+
+   if (!bo->map_cpu) {
+      DBG("iris_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);
+
+      struct drm_i915_gem_mmap mmap_arg = {
+         .handle = bo->gem_handle,
+         .size = bo->size,
+      };
+      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+      if (ret != 0) {
+         ret = -errno;
+         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
+         return NULL;
+      }
+      void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
+      VG_DEFINED(map, bo->size);
+
+      if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
+         VG_NOACCESS(map, bo->size);
+         munmap(map, bo->size);
+      }
+   }
+   assert(bo->map_cpu);
+
+   DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
+       bo->map_cpu);
+   print_flags(flags);
+
+   if (!(flags & MAP_ASYNC)) {
+      bo_wait_with_stall_warning(dbg, bo, "CPU mapping");
+   }
+
+   if (!bo->cache_coherent && !bo->bufmgr->has_llc) {
+      /* If we're reusing an existing CPU mapping, the CPU caches may
+       * contain stale data from the last time we read from that mapping.
+       * (With the BO cache, it might even be data from a previous buffer!)
+       * Even if it's a brand new mapping, the kernel may have zeroed the
+       * buffer via CPU writes.
+       *
+       * We need to invalidate those cachelines so that we see the latest
+       * contents, and so long as we only read from the CPU mmap we do not
+       * need to write those cachelines back afterwards.
+       *
+       * On LLC, the emprical evidence suggests that writes from the GPU
+       * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
+       * cachelines. (Other reads, such as the display engine, bypass the
+       * LLC entirely requiring us to keep dirty pixels for the scanout
+       * out of any cache.)
+       */
+      gen_invalidate_range(bo->map_cpu, bo->size);
+   }
+
+   return bo->map_cpu;
+}
+
+static void *
+iris_bo_map_wc(struct pipe_debug_callback *dbg,
+               struct iris_bo *bo, unsigned flags)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+   if (!bo->map_wc) {
+      DBG("iris_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);
+
+      struct drm_i915_gem_mmap mmap_arg = {
+         .handle = bo->gem_handle,
+         .size = bo->size,
+         .flags = I915_MMAP_WC,
+      };
+      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+      if (ret != 0) {
+         ret = -errno;
+         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
+         return NULL;
+      }
+
+      void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
+      VG_DEFINED(map, bo->size);
+
+      if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
+         VG_NOACCESS(map, bo->size);
+         munmap(map, bo->size);
+      }
+   }
+   assert(bo->map_wc);
+
+   DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
+   print_flags(flags);
+
+   if (!(flags & MAP_ASYNC)) {
+      bo_wait_with_stall_warning(dbg, bo, "WC mapping");
+   }
+
+   return bo->map_wc;
+}
+
+/**
+ * Perform an uncached mapping via the GTT.
+ *
+ * Write access through the GTT is not quite fully coherent. On low power
+ * systems especially, like modern Atoms, we can observe reads from RAM before
+ * the write via GTT has landed. A write memory barrier that flushes the Write
+ * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
+ * read after the write as the GTT write suffers a small delay through the GTT
+ * indirection. The kernel uses an uncached mmio read to ensure the GTT write
+ * is ordered with reads (either by the GPU, WB or WC) and unconditionally
+ * flushes prior to execbuf submission. However, if we are not informing the
+ * kernel about our GTT writes, it will not flush before earlier access, such
+ * as when using the cmdparser. Similarly, we need to be careful if we should
+ * ever issue a CPU read immediately following a GTT write.
+ *
+ * Telling the kernel about write access also has one more important
+ * side-effect. Upon receiving notification about the write, it cancels any
+ * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
+ * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
+ * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
+ * tracking is handled on the buffer exchange instead.
+ */
+static void *
+iris_bo_map_gtt(struct pipe_debug_callback *dbg,
+                struct iris_bo *bo, unsigned flags)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+   /* Get a mapping of the buffer if we haven't before. */
+   if (bo->map_gtt == NULL) {
+      DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);
+
+      struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };
+
+      /* Get the fake offset back... */
+      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
+      if (ret != 0) {
+         DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
+             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
+         return NULL;
+      }
+
+      /* and mmap it. */
+      void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+                       MAP_SHARED, bufmgr->fd, mmap_arg.offset);
+      if (map == MAP_FAILED) {
+         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
+         return NULL;
+      }
+
+      /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
+       * already intercept this mmap call. However, for consistency between
+       * all the mmap paths, we mark the pointer as defined now and mark it
+       * as inaccessible afterwards.
+       */
+      VG_DEFINED(map, bo->size);
+
+      if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
+         VG_NOACCESS(map, bo->size);
+         munmap(map, bo->size);
+      }
+   }
+   assert(bo->map_gtt);
+
+   DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
+   print_flags(flags);
+
+   if (!(flags & MAP_ASYNC)) {
+      bo_wait_with_stall_warning(dbg, bo, "GTT mapping");
+   }
+
+   return bo->map_gtt;
+}
+
+static bool
+can_map_cpu(struct iris_bo *bo, unsigned flags)
+{
+   if (bo->cache_coherent)
+      return true;
+
+   /* Even if the buffer itself is not cache-coherent (such as a scanout), on
+    * an LLC platform reads always are coherent (as they are performed via the
+    * central system agent). It is just the writes that we need to take special
+    * care to ensure that land in main memory and not stick in the CPU cache.
+    */
+   if (!(flags & MAP_WRITE) && bo->bufmgr->has_llc)
+      return true;
+
+   /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
+    * across batch flushes where the kernel will change cache domains of the
+    * bo, invalidating continued access to the CPU mmap on non-LLC device.
+    *
+    * Similarly, ASYNC typically means that the buffer will be accessed via
+    * both the CPU and the GPU simultaneously.  Batches may be executed that
+    * use the BO even while it is mapped.  While OpenGL technically disallows
+    * most drawing while non-persistent mappings are active, we may still use
+    * the GPU for blits or other operations, causing batches to happen at
+    * inconvenient times.
+    */
+   if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC))
+      return false;
+
+   return !(flags & MAP_WRITE);
+}
+
+void *
+iris_bo_map(struct pipe_debug_callback *dbg,
+            struct iris_bo *bo, unsigned flags)
+{
+   if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
+      return iris_bo_map_gtt(dbg, bo, flags);
+
+   void *map;
+
+   if (can_map_cpu(bo, flags))
+      map = iris_bo_map_cpu(dbg, bo, flags);
+   else
+      map = iris_bo_map_wc(dbg, bo, flags);
+
+   /* Allow the attempt to fail by falling back to the GTT where necessary.
+    *
+    * Not every buffer can be mmaped directly using the CPU (or WC), for
+    * example buffers that wrap stolen memory or are imported from other
+    * devices. For those, we have little choice but to use a GTT mmapping.
+    * However, if we use a slow GTT mmapping for reads where we expected fast
+    * access, that order of magnitude difference in throughput will be clearly
+    * expressed by angry users.
+    *
+    * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
+    */
+   if (!map && !(flags & MAP_RAW)) {
+      perf_debug(dbg, "Fallback GTT mapping for %s with access flags %x\n",
+                 bo->name, flags);
+      map = iris_bo_map_gtt(dbg, bo, flags);
+   }
+
+   return map;
+}
+
+int
+iris_bo_subdata(struct iris_bo *bo, uint64_t offset,
+               uint64_t size, const void *data)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+   struct drm_i915_gem_pwrite pwrite = {
+      .handle = bo->gem_handle,
+      .offset = offset,
+      .size = size,
+      .data_ptr = (uint64_t) (uintptr_t) data,
+   };
+
+   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
+   if (ret != 0) {
+      ret = -errno;
+      DBG("%s:%d: Error writing data to buffer %d: "
+          "(%"PRIu64" %"PRIu64") %s .\n",
+          __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
+   }
+
+   return ret;
+}
+
+/** Waits for all GPU rendering with the object to have completed. */
+void
+iris_bo_wait_rendering(struct iris_bo *bo)
+{
+   /* We require a kernel recent enough for WAIT_IOCTL support.
+    * See intel_init_bufmgr()
+    */
+   iris_bo_wait(bo, -1);
+}
+
+/**
+ * Waits on a BO for the given amount of time.
+ *
+ * @bo: buffer object to wait for
+ * @timeout_ns: amount of time to wait in nanoseconds.
+ *   If value is less than 0, an infinite wait will occur.
+ *
+ * Returns 0 if the wait was successful ie. the last batch referencing the
+ * object has completed within the allotted time. Otherwise some negative return
+ * value describes the error. Of particular interest is -ETIME when the wait has
+ * failed to yield the desired result.
+ *
+ * Similar to iris_bo_wait_rendering except a timeout parameter allows
+ * the operation to give up after a certain amount of time. Another subtle
+ * difference is the internal locking semantics are different (this variant does
+ * not hold the lock for the duration of the wait). This makes the wait subject
+ * to a larger userspace race window.
+ *
+ * The implementation shall wait until the object is no longer actively
+ * referenced within a batch buffer at the time of the call. The wait will
+ * not guarantee that the buffer is re-issued via another thread, or an flinked
+ * handle. Userspace must make sure this race does not occur if such precision
+ * is important.
+ *
+ * Note that some kernels have broken the inifite wait for negative values
+ * promise, upgrade to latest stable kernels if this is the case.
+ */
+int
+iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+   /* If we know it's idle, don't bother with the kernel round trip */
+   if (bo->idle && !bo->external)
+      return 0;
+
+   struct drm_i915_gem_wait wait = {
+      .bo_handle = bo->gem_handle,
+      .timeout_ns = timeout_ns,
+   };
+   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
+   if (ret == -1)
+      return -errno;
+
+   bo->idle = true;
+
+   return ret;
+}
+
+void
+iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
+{
+   mtx_destroy(&bufmgr->lock);
+
+   /* Free any cached buffer objects we were going to reuse */
+   for (int i = 0; i < bufmgr->num_buckets; i++) {
+      struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
+
+      list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
+         list_del(&bo->head);
+
+         bo_free(bo);
+      }
+   }
+
+   _mesa_hash_table_destroy(bufmgr->name_table, NULL);
+   _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
+
+   free(bufmgr);
+}
+
+static int
+bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
+                       uint32_t stride)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+   struct drm_i915_gem_set_tiling set_tiling;
+   int ret;
+
+   if (bo->global_name == 0 &&
+       tiling_mode == bo->tiling_mode && stride == bo->stride)
+      return 0;
+
+   memset(&set_tiling, 0, sizeof(set_tiling));
+   do {
+      /* set_tiling is slightly broken and overwrites the
+       * input on the error path, so we have to open code
+       * drm_ioctl.
+       */
+      set_tiling.handle = bo->gem_handle;
+      set_tiling.tiling_mode = tiling_mode;
+      set_tiling.stride = stride;
+
+      ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
+   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+   if (ret == -1)
+      return -errno;
+
+   bo->tiling_mode = set_tiling.tiling_mode;
+   bo->swizzle_mode = set_tiling.swizzle_mode;
+   bo->stride = set_tiling.stride;
+   return 0;
+}
+
+int
+iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode,
+                  uint32_t *swizzle_mode)
+{
+   *tiling_mode = bo->tiling_mode;
+   *swizzle_mode = bo->swizzle_mode;
+   return 0;
+}
+
+struct iris_bo *
+iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
+{
+   uint32_t handle;
+   struct iris_bo *bo;
+
+   mtx_lock(&bufmgr->lock);
+   int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
+   if (ret) {
+      DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
+          strerror(errno));
+      mtx_unlock(&bufmgr->lock);
+      return NULL;
+   }
+
+   /*
+    * See if the kernel has already returned this buffer to us. Just as
+    * for named buffers, we must not create two bo's pointing at the same
+    * kernel object
+    */
+   bo = hash_find_bo(bufmgr->handle_table, handle);
+   if (bo) {
+      iris_bo_reference(bo);
+      goto out;
+   }
+
+   bo = calloc(1, sizeof(*bo));
+   if (!bo)
+      goto out;
+
+   p_atomic_set(&bo->refcount, 1);
+
+   /* Determine size of bo.  The fd-to-handle ioctl really should
+    * return the size, but it doesn't.  If we have kernel 3.12 or
+    * later, we can lseek on the prime fd to get the size.  Older
+    * kernels will just fail, in which case we fall back to the
+    * provided (estimated or guess size). */
+   ret = lseek(prime_fd, 0, SEEK_END);
+   if (ret != -1)
+      bo->size = ret;
+
+   bo->bufmgr = bufmgr;
+
+   bo->gem_handle = handle;
+   _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
+
+   bo->name = "prime";
+   bo->reusable = false;
+   bo->external = true;
+
+   struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
+   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
+      goto err;
+
+   bo->tiling_mode = get_tiling.tiling_mode;
+   bo->swizzle_mode = get_tiling.swizzle_mode;
+   /* XXX stride is unknown */
+
+out:
+   mtx_unlock(&bufmgr->lock);
+   return bo;
+
+err:
+   bo_free(bo);
+   mtx_unlock(&bufmgr->lock);
+   return NULL;
+}
+
+static void
+iris_bo_make_external(struct iris_bo *bo)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+   if (!bo->external) {
+      mtx_lock(&bufmgr->lock);
+      if (!bo->external) {
+         _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
+         bo->external = true;
+      }
+      mtx_unlock(&bufmgr->lock);
+   }
+}
+
+int
+iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+   iris_bo_make_external(bo);
+
+   if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
+                          DRM_CLOEXEC, prime_fd) != 0)
+      return -errno;
+
+   bo->reusable = false;
+
+   return 0;
+}
+
+uint32_t
+iris_bo_export_gem_handle(struct iris_bo *bo)
+{
+   iris_bo_make_external(bo);
+
+   return bo->gem_handle;
+}
+
+int
+iris_bo_flink(struct iris_bo *bo, uint32_t *name)
+{
+   struct iris_bufmgr *bufmgr = bo->bufmgr;
+
+   if (!bo->global_name) {
+      struct drm_gem_flink flink = { .handle = bo->gem_handle };
+
+      if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
+         return -errno;
+
+      iris_bo_make_external(bo);
+      mtx_lock(&bufmgr->lock);
+      if (!bo->global_name) {
+         bo->global_name = flink.name;
+         _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
+      }
+      mtx_unlock(&bufmgr->lock);
+
+      bo->reusable = false;
+   }
+
+   *name = bo->global_name;
+   return 0;
+}
+
+/**
+ * Enables unlimited caching of buffer objects for reuse.
+ *
+ * This is potentially very memory expensive, as the cache at each bucket
+ * size is only bounded by how many buffers of that size we've managed to have
+ * in flight at once.
+ */
+void
+iris_bufmgr_enable_reuse(struct iris_bufmgr *bufmgr)
+{
+   bufmgr->bo_reuse = true;
+}
+
+static void
+add_bucket(struct iris_bufmgr *bufmgr, int size)
+{
+   unsigned int i = bufmgr->num_buckets;
+
+   assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
+
+   list_inithead(&bufmgr->cache_bucket[i].head);
+   bufmgr->cache_bucket[i].size = size;
+   bufmgr->num_buckets++;
+
+   assert(bucket_for_size(bufmgr, size) == &bufmgr->cache_bucket[i]);
+   assert(bucket_for_size(bufmgr, size - 2048) == &bufmgr->cache_bucket[i]);
+   assert(bucket_for_size(bufmgr, size + 1) != &bufmgr->cache_bucket[i]);
+}
+
+static void
+init_cache_buckets(struct iris_bufmgr *bufmgr)
+{
+   uint64_t size, cache_max_size = 64 * 1024 * 1024;
+
+   /* OK, so power of two buckets was too wasteful of memory.
+    * Give 3 other sizes between each power of two, to hopefully
+    * cover things accurately enough.  (The alternative is
+    * probably to just go for exact matching of sizes, and assume
+    * that for things like composited window resize the tiled
+    * width/height alignment and rounding of sizes to pages will
+    * get us useful cache hit rates anyway)
+    */
+   add_bucket(bufmgr, 4096);
+   add_bucket(bufmgr, 4096 * 2);
+   add_bucket(bufmgr, 4096 * 3);
+
+   /* Initialize the linked lists for BO reuse cache. */
+   for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
+      add_bucket(bufmgr, size);
+
+      add_bucket(bufmgr, size + size * 1 / 4);
+      add_bucket(bufmgr, size + size * 2 / 4);
+      add_bucket(bufmgr, size + size * 3 / 4);
+   }
+}
+
+uint32_t
+iris_create_hw_context(struct iris_bufmgr *bufmgr)
+{
+   struct drm_i915_gem_context_create create = { };
+   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
+   if (ret != 0) {
+      DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
+      return 0;
+   }
+
+   return create.ctx_id;
+}
+
+int
+iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
+                            uint32_t ctx_id,
+                            int priority)
+{
+   struct drm_i915_gem_context_param p = {
+      .ctx_id = ctx_id,
+      .param = I915_CONTEXT_PARAM_PRIORITY,
+      .value = priority,
+   };
+   int err;
+
+   err = 0;
+   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
+      err = -errno;
+
+   return err;
+}
+
+void
+iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id)
+{
+   struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
+
+   if (ctx_id != 0 &&
+       drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
+      fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
+              strerror(errno));
+   }
+}
+
+int
+iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
+{
+   struct drm_i915_reg_read reg_read = { .offset = offset };
+   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
+
+   *result = reg_read.val;
+   return ret;
+}
+
+/**
+ * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
+ * and manage map buffer objections.
+ *
+ * \param fd File descriptor of the opened DRM device.
+ */
+struct iris_bufmgr *
+iris_bufmgr_init(struct gen_device_info *devinfo, int fd)
+{
+   struct iris_bufmgr *bufmgr = calloc(1, sizeof(*bufmgr));
+   if (bufmgr == NULL)
+      return NULL;
+
+   /* Handles to buffer objects belong to the device fd and are not
+    * reference counted by the kernel.  If the same fd is used by
+    * multiple parties (threads sharing the same screen bufmgr, or
+    * even worse the same device fd passed to multiple libraries)
+    * ownership of those handles is shared by those independent parties.
+    *
+    * Don't do this! Ensure that each library/bufmgr has its own device
+    * fd so that its namespace does not clash with another.
+    */
+   bufmgr->fd = fd;
+
+   if (mtx_init(&bufmgr->lock, mtx_plain) != 0) {
+      free(bufmgr);
+      return NULL;
+   }
+
+   bufmgr->has_llc = devinfo->has_llc;
+
+   init_cache_buckets(bufmgr);
+
+   bufmgr->name_table =
+      _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
+   bufmgr->handle_table =
+      _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
+
+   return bufmgr;
+}
diff --git a/src/gallium/drivers/iris/iris_bufmgr.h b/src/gallium/drivers/iris/iris_bufmgr.h
new file mode 100644 (file)
index 0000000..13b8776
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef IRIS_BUFMGR_H
+#define IRIS_BUFMGR_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include "util/macros.h"
+#include "util/u_atomic.h"
+#include "util/list.h"
+
+struct gen_device_info;
+struct pipe_debug_callback;
+
+struct iris_bo {
+   /**
+    * Size in bytes of the buffer object.
+    *
+    * The size may be larger than the size originally requested for the
+    * allocation, such as being aligned to page size.
+    */
+   uint64_t size;
+
+   /**
+    * Alignment requirement for object
+    *
+    * Used for GTT mapping & pinning the object.
+    */
+   uint64_t align;
+
+   /** Buffer manager context associated with this buffer object */
+   struct iris_bufmgr *bufmgr;
+
+   /** The GEM handle for this buffer object. */
+   uint32_t gem_handle;
+
+   /**
+    * Offset of the buffer inside the Graphics Translation Table.
+    *
+    * This is effectively our GPU address for the buffer and we use it
+    * as our base for all state pointers into the buffer. However, since the
+    * kernel may be forced to move it around during the course of the
+    * buffer's lifetime, we can only know where the buffer was on the last
+    * execbuf. We presume, and are usually right, that the buffer will not
+    * move and so we use that last offset for the next batch and by doing
+    * so we can avoid having the kernel perform a relocation fixup pass as
+    * our pointers inside the batch will be using the correct base offset.
+    *
+    * Since we do use it as a base address for the next batch of pointers,
+    * the kernel treats our offset as a request, and if possible will
+    * arrange the buffer to placed at that address (trying to balance
+    * the cost of buffer migration versus the cost of performing
+    * relocations). Furthermore, we can force the kernel to place the buffer,
+    * or report a failure if we specified a conflicting offset, at our chosen
+    * offset by specifying EXEC_OBJECT_PINNED.
+    *
+    * Note the GTT may be either per context, or shared globally across the
+    * system. On a shared system, our buffers have to contend for address
+    * space with both aperture mappings and framebuffers and so are more
+    * likely to be moved. On a full ppGTT system, each batch exists in its
+    * own GTT, and so each buffer may have their own offset within each
+    * context.
+    */
+   uint64_t gtt_offset;
+
+   /**
+    * The validation list index for this buffer, or -1 when not in a batch.
+    * Note that a single buffer may be in multiple batches (contexts), and
+    * this is a global field, which refers to the last batch using the BO.
+    * It should not be considered authoritative, but can be used to avoid a
+    * linear walk of the validation list in the common case by guessing that
+    * exec_bos[bo->index] == bo and confirming whether that's the case.
+    */
+   unsigned index;
+
+   /**
+    * Boolean of whether the GPU is definitely not accessing the buffer.
+    *
+    * This is only valid when reusable, since non-reusable
+    * buffers are those that have been shared with other
+    * processes, so we don't know their state.
+    */
+   bool idle;
+
+   int refcount;
+   const char *name;
+
+   uint64_t kflags;
+
+   /**
+    * Kenel-assigned global name for this object
+    *
+    * List contains both flink named and prime fd'd objects
+    */
+   unsigned global_name;
+
+   /**
+    * Current tiling mode
+    */
+   uint32_t tiling_mode;
+   uint32_t swizzle_mode;
+   uint32_t stride;
+
+   time_t free_time;
+
+   /** Mapped address for the buffer, saved across map/unmap cycles */
+   void *map_cpu;
+   /** GTT virtual address for the buffer, saved across map/unmap cycles */
+   void *map_gtt;
+   /** WC CPU address for the buffer, saved across map/unmap cycles */
+   void *map_wc;
+
+   /** BO cache list */
+   struct list_head head;
+
+   /**
+    * Boolean of whether this buffer can be re-used
+    */
+   bool reusable;
+
+   /**
+    * Boolean of whether this buffer has been shared with an external client.
+    */
+   bool external;
+
+   /**
+    * Boolean of whether this buffer is cache coherent
+    */
+   bool cache_coherent;
+};
+
+#define BO_ALLOC_BUSY       (1<<0)
+#define BO_ALLOC_ZEROED     (1<<1)
+
+/**
+ * Allocate a buffer object.
+ *
+ * Buffer objects are not necessarily initially mapped into CPU virtual
+ * address space or graphics device aperture.  They must be mapped
+ * using iris_bo_map() to be used by the CPU.
+ */
+struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr, const char *name,
+                              uint64_t size, uint64_t alignment);
+
+/**
+ * Allocate a tiled buffer object.
+ *
+ * Alignment for tiled objects is set automatically; the 'flags'
+ * argument provides a hint about how the object will be used initially.
+ *
+ * Valid tiling formats are:
+ *  I915_TILING_NONE
+ *  I915_TILING_X
+ *  I915_TILING_Y
+ */
+struct iris_bo *iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr,
+                                    const char *name,
+                                    uint64_t size,
+                                    uint32_t tiling_mode,
+                                    uint32_t pitch,
+                                    unsigned flags);
+
+/** Takes a reference on a buffer object */
+static inline void
+iris_bo_reference(struct iris_bo *bo)
+{
+   p_atomic_inc(&bo->refcount);
+}
+
+/**
+ * Releases a reference on a buffer object, freeing the data if
+ * no references remain.
+ */
+void iris_bo_unreference(struct iris_bo *bo);
+
+#define MAP_READ          0x01
+#define MAP_WRITE         0x02
+#define MAP_ASYNC         0x20
+#define MAP_PERSISTENT    0x40
+#define MAP_COHERENT      0x80
+/* internal */
+#define MAP_INTERNAL_MASK (0xff << 24)
+#define MAP_RAW           (0x01 << 24)
+
+/**
+ * Maps the buffer into userspace.
+ *
+ * This function will block waiting for any existing execution on the
+ * buffer to complete, first.  The resulting mapping is returned.
+ */
+MUST_CHECK void *iris_bo_map(struct pipe_debug_callback *dbg,
+                             struct iris_bo *bo, unsigned flags);
+
+/**
+ * Reduces the refcount on the userspace mapping of the buffer
+ * object.
+ */
+static inline int iris_bo_unmap(struct iris_bo *bo) { return 0; }
+
+/** Write data into an object. */
+int iris_bo_subdata(struct iris_bo *bo, uint64_t offset,
+                   uint64_t size, const void *data);
+/**
+ * Waits for rendering to an object by the GPU to have completed.
+ *
+ * This is not required for any access to the BO by bo_map,
+ * bo_subdata, etc.  It is merely a way for the driver to implement
+ * glFinish.
+ */
+void iris_bo_wait_rendering(struct iris_bo *bo);
+
+/**
+ * Tears down the buffer manager instance.
+ */
+void iris_bufmgr_destroy(struct iris_bufmgr *bufmgr);
+
+/**
+ * Get the current tiling (and resulting swizzling) mode for the bo.
+ *
+ * \param buf Buffer to get tiling mode for
+ * \param tiling_mode returned tiling mode
+ * \param swizzle_mode returned swizzling mode
+ */
+int iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode,
+                      uint32_t *swizzle_mode);
+
+/**
+ * Create a visible name for a buffer which can be used by other apps
+ *
+ * \param buf Buffer to create a name for
+ * \param name Returned name
+ */
+int iris_bo_flink(struct iris_bo *bo, uint32_t *name);
+
+/**
+ * Returns 1 if mapping the buffer for write could cause the process
+ * to block, due to the object being active in the GPU.
+ */
+int iris_bo_busy(struct iris_bo *bo);
+
+/**
+ * Specify the volatility of the buffer.
+ * \param bo Buffer to create a name for
+ * \param madv The purgeable status
+ *
+ * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
+ * reclaimed under memory pressure. If you subsequently require the buffer,
+ * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
+ *
+ * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
+ * marked as I915_MADV_DONTNEED.
+ */
+int iris_bo_madvise(struct iris_bo *bo, int madv);
+
+/* drm_bacon_bufmgr_gem.c */
+struct iris_bufmgr *iris_bufmgr_init(struct gen_device_info *devinfo, int fd);
+struct iris_bo *iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
+                                             const char *name,
+                                             unsigned handle);
+void iris_bufmgr_enable_reuse(struct iris_bufmgr *bufmgr);
+
+int iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns);
+
+uint32_t iris_create_hw_context(struct iris_bufmgr *bufmgr);
+
+#define IRIS_CONTEXT_LOW_PRIORITY    ((I915_CONTEXT_MIN_USER_PRIORITY-1)/2)
+#define IRIS_CONTEXT_MEDIUM_PRIORITY (I915_CONTEXT_DEFAULT_PRIORITY)
+#define IRIS_CONTEXT_HIGH_PRIORITY   ((I915_CONTEXT_MAX_USER_PRIORITY+1)/2)
+
+int iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
+                                 uint32_t ctx_id, int priority);
+
+void iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id);
+
+int iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd);
+struct iris_bo *iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd);
+
+uint32_t iris_bo_export_gem_handle(struct iris_bo *bo);
+
+int iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *out);
+
+int drm_ioctl(int fd, unsigned long request, void *arg);
+
+
+#endif /* IRIS_BUFMGR_H */
diff --git a/src/gallium/drivers/iris/iris_context.h b/src/gallium/drivers/iris/iris_context.h
new file mode 100644 (file)
index 0000000..49d199f
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef IRIS_CONTEXT_H
+#define IRIS_CONTEXT_H
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+#include "util/u_debug.h"
+#include "intel/common/gen_debug.h"
+#include "iris_screen.h"
+
+struct iris_bo;
+
+#define IRIS_MAX_TEXTURE_SAMPLERS 32
+#define IRIS_MAX_VIEWPORTS 16
+
+enum iris_dirty {
+   IRIS_DIRTY_COLOR_CALC_STATE,
+   IRIS_DIRTY_POLYGON_STIPPLE,
+   IRIS_DIRTY_SCISSOR_RECT,
+   IRIS_DIRTY_WM_DEPTH_STENCIL,
+};
+
+#define IRIS_NEW_COLOR_CALC_STATE (1ull << IRIS_DIRTY_COLOR_CALC_STATE)
+#define IRIS_NEW_POLYGON_STIPPLE  (1ull << IRIS_DIRTY_POLYGON_STIPPLE)
+#define IRIS_NEW_SCISSOR_RECT     (1ull << IRIS_DIRTY_SCISSOR_RECT)
+#define IRIS_NEW_WM_DEPTH_STENCIL (1ull << IRIS_DIRTY_WM_DEPTH_STENCIL)
+
+struct iris_context {
+   struct pipe_context ctx;
+
+   struct pipe_debug_callback dbg;
+
+   struct {
+      uint64_t dirty;
+      struct pipe_blend_color blend_color;
+      struct pipe_poly_stipple poly_stipple;
+      struct pipe_scissor_state scissors[IRIS_MAX_VIEWPORTS];
+      struct pipe_stencil_ref stencil_ref;
+   } state;
+};
+
+#define perf_debug(dbg, ...) do {                      \
+   if (INTEL_DEBUG & DEBUG_PERF)                       \
+      dbg_printf(__VA_ARGS__);                         \
+   if (unlikely(dbg))                                  \
+      pipe_debug_message(dbg, PERF_INFO, __VA_ARGS__); \
+} while(0)
+
+double get_time(void);
+
+struct pipe_context *
+iris_create_context(struct pipe_screen *screen, void *priv, unsigned flags);
+
+void iris_init_program_functions(struct pipe_context *ctx);
+void iris_init_state_functions(struct pipe_context *ctx);
+
+#endif
diff --git a/src/gallium/drivers/iris/iris_draw.c b/src/gallium/drivers/iris/iris_draw.c
new file mode 100644 (file)
index 0000000..b2472dd
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "util/u_inlines.h"
+#include "util/u_transfer.h"
+#include "intel/compiler/brw_compiler.h"
+#include "iris_context.h"
+
+#define __gen_address_type unsigned
+#define __gen_user_data void
+
+static uint64_t
+__gen_combine_address(void *user_data, void *location,
+                      unsigned address, uint32_t delta)
+{
+   return delta;
+}
+
+#define __genxml_cmd_length(cmd) cmd ## _length
+#define __genxml_cmd_length_bias(cmd) cmd ## _length_bias
+#define __genxml_cmd_header(cmd) cmd ## _header
+#define __genxml_cmd_pack(cmd) cmd ## _pack
+
+#define iris_pack_command(cmd, dst, name)                         \
+   for (struct cmd name = { __genxml_cmd_header(cmd) },           \
+        *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
+        __genxml_cmd_pack(cmd)(NULL, (void *)dst, &name),         \
+        _dst = NULL)
+
+#define iris_pack_state(cmd, dst, name)                           \
+   for (struct cmd name = {},                                     \
+        *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
+        __genxml_cmd_pack(cmd)(NULL, (void *)_dst, &name),        \
+        _dst = NULL)
+
+#include "genxml/genX_pack.h"
+#include "genxml/gen_macros.h"
+
+static void
+iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
+{
+#if 0
+   l3 configuration
+
+   3DSTATE_VIEWPORT_STATE_POINTERS_CC - CC_VIEWPORT
+     -> from iris_depth_stencil_alpha_state
+
+   3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL - SF_CLIP_VIEWPORT
+     -> pipe_viewport_state for matrix elements, guardband is calculated
+        from those.  can calculate screen space from matrix apparently...
+
+   3DSTATE_SCISSOR_STATE_POINTERS - SCISSOR_RECT
+     -> from ice->state.scissors
+
+   3DSTATE_PUSH_CONSTANT_ALLOC_*
+   3DSTATE_URB_*
+     -> TODO
+
+   3DSTATE_PS_BLEND
+   3DSTATE_BLEND_STATE_POINTERS - BLEND_STATE
+     -> from iris_blend_state (most) + iris_depth_stencil_alpha_state
+        (alpha test function/enable) + has writeable RT from ???????
+
+   3DSTATE_CC_STATE_POINTERS - COLOR_CALC_STATE
+     -> from ice->state.blend_color + iris_depth_stencil_alpha_state
+        (ref_value)
+
+   3DSTATE_CONSTANT_* - push constants
+     -> TODO
+
+   Surfaces:
+   - pull constants
+   - ubos/ssbos/abos
+   - images
+   - textures
+   - render targets - write and read
+   3DSTATE_BINDING_TABLE_POINTERS_*
+     -> TODO
+
+   3DSTATE_SAMPLER_STATE_POINTERS_*
+     -> TODO
+
+   3DSTATE_MULTISAMPLE
+   3DSTATE_SAMPLE_MASK
+
+   3DSTATE_VS
+   3DSTATE_HS
+   3DSTATE_TE
+   3DSTATE_DS
+   3DSTATE_GS
+   3DSTATE_PS_EXTRA
+   3DSTATE_PS
+   3DSTATE_STREAMOUT
+   3DSTATE_SO_BUFFER
+   3DSTATE_SO_DECL_LIST
+
+   3DSTATE_CLIP
+     -> iris_raster_state + ??? (Non-perspective Bary, ForceZeroRTAIndex)
+
+   3DSTATE_RASTER
+   3DSTATE_SF
+     -> iris_raster_state
+
+   3DSTATE_WM
+     -> iris_raster_state + FS state (barycentric, EDSC)
+   3DSTATE_SBE
+     -> iris_raster_state (point sprite texture coordinate origin)
+     -> bunch of shader state...
+   3DSTATE_SBE_SWIZ
+     -> FS state
+
+   3DSTATE_DEPTH_BUFFER
+   3DSTATE_HIER_DEPTH_BUFFER
+   3DSTATE_STENCIL_BUFFER
+   3DSTATE_CLEAR_PARAMS
+     -> iris_framebuffer_state?
+
+   3DSTATE_VF_TOPOLOGY
+     -> pipe_draw_info (prim_mode)
+   3DSTATE_VF
+     -> pipe_draw_info (restart_index, primitive_restart)
+
+   3DSTATE_INDEX_BUFFER
+     -> pipe_draw_info (index)
+   3DSTATE_VERTEX_BUFFERS
+     -> pipe_vertex_buffer (set_vertex_buffer hook)
+   3DSTATE_VERTEX_ELEMENTS
+     -> iris_vertex_element
+   3DSTATE_VF_INSTANCING
+     -> iris_vertex_element
+   3DSTATE_VF_SGVS
+     -> TODO ???
+   3DSTATE_VF_COMPONENT_PACKING
+     -> TODO ???
+
+   3DPRIMITIVE
+     -> pipe_draw_info
+
+   rare:
+   3DSTATE_POLY_STIPPLE_OFFSET
+   3DSTATE_POLY_STIPPLE_PATTERN
+     -> ice->state.poly_stipple
+   3DSTATE_LINE_STIPPLE
+     -> iris_raster_state
+
+   once:
+   3DSTATE_AA_LINE_PARAMETERS
+   3DSTATE_WM_CHROMAKEY
+   3DSTATE_SAMPLE_PATTERN
+   3DSTATE_DRAWING_RECTANGLE
+   3DSTATE_WM_HZ_OP
+#endif
+}
diff --git a/src/gallium/drivers/iris/iris_formats.c b/src/gallium/drivers/iris/iris_formats.c
new file mode 100644 (file)
index 0000000..54a638a
--- /dev/null
@@ -0,0 +1,474 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "util/u_format.h"
+#include "util/macros.h"
+
+#include "iris_resource.h"
+#include "iris_screen.h"
+
+enum isl_format
+iris_isl_format_for_pipe_format(enum pipe_format pf)
+{
+   static const enum isl_format table[PIPE_FORMAT_COUNT] = {
+      [0 ... PIPE_FORMAT_COUNT-1] = ISL_FORMAT_UNSUPPORTED,
+
+      [PIPE_FORMAT_B8G8R8A8_UNORM]          = ISL_FORMAT_B8G8R8A8_UNORM,
+      [PIPE_FORMAT_B8G8R8X8_UNORM]          = ISL_FORMAT_B8G8R8X8_UNORM,
+      //[PIPE_FORMAT_A8R8G8B8_UNORM]          = ISL_FORMAT_A8R8G8B8_UNORM,
+      //[PIPE_FORMAT_X8R8G8B8_UNORM]          = ISL_FORMAT_X8R8G8B8_UNORM,
+      [PIPE_FORMAT_B5G5R5A1_UNORM]          = ISL_FORMAT_B5G5R5A1_UNORM,
+      [PIPE_FORMAT_B4G4R4A4_UNORM]          = ISL_FORMAT_B4G4R4A4_UNORM,
+      [PIPE_FORMAT_B5G6R5_UNORM]            = ISL_FORMAT_B5G6R5_UNORM,
+      [PIPE_FORMAT_R10G10B10A2_UNORM]       = ISL_FORMAT_R10G10B10A2_UNORM,
+      [PIPE_FORMAT_L8_UNORM]                = ISL_FORMAT_L8_UNORM,
+      [PIPE_FORMAT_A8_UNORM]                = ISL_FORMAT_A8_UNORM,
+      [PIPE_FORMAT_I8_UNORM]                = ISL_FORMAT_I8_UNORM,
+      [PIPE_FORMAT_L8A8_UNORM]              = ISL_FORMAT_L8A8_UNORM,
+      [PIPE_FORMAT_L16_UNORM]               = ISL_FORMAT_L16_UNORM,
+      //[PIPE_FORMAT_UYVY]                    = ISL_FORMAT_UYVY,
+      //[PIPE_FORMAT_YUYV]                    = ISL_FORMAT_YUYV,
+      [PIPE_FORMAT_Z16_UNORM]               = ISL_FORMAT_R16_UNORM,
+      [PIPE_FORMAT_Z32_UNORM]               = ISL_FORMAT_R32_UNORM,
+      [PIPE_FORMAT_Z32_FLOAT]               = ISL_FORMAT_R32_FLOAT,
+      //[PIPE_FORMAT_Z24_UNORM_S8_UINT]       = ISL_FORMAT_R24_UNORM_S8_UINT,
+      //[PIPE_FORMAT_S8_UINT_Z24_UNORM]       = ISL_FORMAT_S8_UINT_Z24_UNORM,
+      [PIPE_FORMAT_Z24X8_UNORM]             = ISL_FORMAT_R24_UNORM_X8_TYPELESS,
+      //[PIPE_FORMAT_X8Z24_UNORM]             = ISL_FORMAT_R24_UNORM_X8_TYPELESS,
+      [PIPE_FORMAT_S8_UINT]                 = ISL_FORMAT_R8_UINT,
+      [PIPE_FORMAT_R64_FLOAT]               = ISL_FORMAT_R64_FLOAT,
+      [PIPE_FORMAT_R64G64_FLOAT]            = ISL_FORMAT_R64G64_FLOAT,
+      [PIPE_FORMAT_R64G64B64_FLOAT]         = ISL_FORMAT_R64G64B64_FLOAT,
+      [PIPE_FORMAT_R64G64B64A64_FLOAT]      = ISL_FORMAT_R64G64B64A64_FLOAT,
+      [PIPE_FORMAT_R32_FLOAT]               = ISL_FORMAT_R32_FLOAT,
+      [PIPE_FORMAT_R32G32_FLOAT]            = ISL_FORMAT_R32G32_FLOAT,
+      [PIPE_FORMAT_R32G32B32_FLOAT]         = ISL_FORMAT_R32G32B32_FLOAT,
+      [PIPE_FORMAT_R32G32B32A32_FLOAT]      = ISL_FORMAT_R32G32B32A32_FLOAT,
+      [PIPE_FORMAT_R32_UNORM]               = ISL_FORMAT_R32_UNORM,
+      [PIPE_FORMAT_R32G32_UNORM]            = ISL_FORMAT_R32G32_UNORM,
+      [PIPE_FORMAT_R32G32B32_UNORM]         = ISL_FORMAT_R32G32B32_UNORM,
+      [PIPE_FORMAT_R32G32B32A32_UNORM]      = ISL_FORMAT_R32G32B32A32_UNORM,
+      [PIPE_FORMAT_R32_USCALED]             = ISL_FORMAT_R32_USCALED,
+      [PIPE_FORMAT_R32G32_USCALED]          = ISL_FORMAT_R32G32_USCALED,
+      [PIPE_FORMAT_R32G32B32_USCALED]       = ISL_FORMAT_R32G32B32_USCALED,
+      [PIPE_FORMAT_R32G32B32A32_USCALED]    = ISL_FORMAT_R32G32B32A32_USCALED,
+      [PIPE_FORMAT_R32_SNORM]               = ISL_FORMAT_R32_SNORM,
+      [PIPE_FORMAT_R32G32_SNORM]            = ISL_FORMAT_R32G32_SNORM,
+      [PIPE_FORMAT_R32G32B32_SNORM]         = ISL_FORMAT_R32G32B32_SNORM,
+      [PIPE_FORMAT_R32G32B32A32_SNORM]      = ISL_FORMAT_R32G32B32A32_SNORM,
+      [PIPE_FORMAT_R32_SSCALED]             = ISL_FORMAT_R32_SSCALED,
+      [PIPE_FORMAT_R32G32_SSCALED]          = ISL_FORMAT_R32G32_SSCALED,
+      [PIPE_FORMAT_R32G32B32_SSCALED]       = ISL_FORMAT_R32G32B32_SSCALED,
+      [PIPE_FORMAT_R32G32B32A32_SSCALED]    = ISL_FORMAT_R32G32B32A32_SSCALED,
+      [PIPE_FORMAT_R16_UNORM]               = ISL_FORMAT_R16_UNORM,
+      [PIPE_FORMAT_R16G16_UNORM]            = ISL_FORMAT_R16G16_UNORM,
+      [PIPE_FORMAT_R16G16B16_UNORM]         = ISL_FORMAT_R16G16B16_UNORM,
+      [PIPE_FORMAT_R16G16B16A16_UNORM]      = ISL_FORMAT_R16G16B16A16_UNORM,
+      [PIPE_FORMAT_R16_USCALED]             = ISL_FORMAT_R16_USCALED,
+      [PIPE_FORMAT_R16G16_USCALED]          = ISL_FORMAT_R16G16_USCALED,
+      [PIPE_FORMAT_R16G16B16_USCALED]       = ISL_FORMAT_R16G16B16_USCALED,
+      [PIPE_FORMAT_R16G16B16A16_USCALED]    = ISL_FORMAT_R16G16B16A16_USCALED,
+      [PIPE_FORMAT_R16_SNORM]               = ISL_FORMAT_R16_SNORM,
+      [PIPE_FORMAT_R16G16_SNORM]            = ISL_FORMAT_R16G16_SNORM,
+      [PIPE_FORMAT_R16G16B16_SNORM]         = ISL_FORMAT_R16G16B16_SNORM,
+      [PIPE_FORMAT_R16G16B16A16_SNORM]      = ISL_FORMAT_R16G16B16A16_SNORM,
+      [PIPE_FORMAT_R16_SSCALED]             = ISL_FORMAT_R16_SSCALED,
+      [PIPE_FORMAT_R16G16_SSCALED]          = ISL_FORMAT_R16G16_SSCALED,
+      [PIPE_FORMAT_R16G16B16_SSCALED]       = ISL_FORMAT_R16G16B16_SSCALED,
+      [PIPE_FORMAT_R16G16B16A16_SSCALED]    = ISL_FORMAT_R16G16B16A16_SSCALED,
+      [PIPE_FORMAT_R8_UNORM]                = ISL_FORMAT_R8_UNORM,
+      [PIPE_FORMAT_R8G8_UNORM]              = ISL_FORMAT_R8G8_UNORM,
+      [PIPE_FORMAT_R8G8B8_UNORM]            = ISL_FORMAT_R8G8B8_UNORM,
+      [PIPE_FORMAT_R8G8B8A8_UNORM]          = ISL_FORMAT_R8G8B8A8_UNORM,
+      //[PIPE_FORMAT_X8B8G8R8_UNORM]          = ISL_FORMAT_X8B8G8R8_UNORM,
+      [PIPE_FORMAT_R8_USCALED]              = ISL_FORMAT_R8_USCALED,
+      [PIPE_FORMAT_R8G8_USCALED]            = ISL_FORMAT_R8G8_USCALED,
+      [PIPE_FORMAT_R8G8B8_USCALED]          = ISL_FORMAT_R8G8B8_USCALED,
+      [PIPE_FORMAT_R8G8B8A8_USCALED]        = ISL_FORMAT_R8G8B8A8_USCALED,
+      [PIPE_FORMAT_R8_SNORM]                = ISL_FORMAT_R8_SNORM,
+      [PIPE_FORMAT_R8G8_SNORM]              = ISL_FORMAT_R8G8_SNORM,
+      [PIPE_FORMAT_R8G8B8_SNORM]            = ISL_FORMAT_R8G8B8_SNORM,
+      [PIPE_FORMAT_R8G8B8A8_SNORM]          = ISL_FORMAT_R8G8B8A8_SNORM,
+      [PIPE_FORMAT_R8_SSCALED]              = ISL_FORMAT_R8_SSCALED,
+      [PIPE_FORMAT_R8G8_SSCALED]            = ISL_FORMAT_R8G8_SSCALED,
+      [PIPE_FORMAT_R8G8B8_SSCALED]          = ISL_FORMAT_R8G8B8_SSCALED,
+      [PIPE_FORMAT_R8G8B8A8_SSCALED]        = ISL_FORMAT_R8G8B8A8_SSCALED,
+      [PIPE_FORMAT_R32_FIXED]               = ISL_FORMAT_R32_SFIXED,
+      [PIPE_FORMAT_R32G32_FIXED]            = ISL_FORMAT_R32G32_SFIXED,
+      [PIPE_FORMAT_R32G32B32_FIXED]         = ISL_FORMAT_R32G32B32_SFIXED,
+      [PIPE_FORMAT_R32G32B32A32_FIXED]      = ISL_FORMAT_R32G32B32A32_SFIXED,
+      [PIPE_FORMAT_R16_FLOAT]               = ISL_FORMAT_R16_FLOAT,
+      [PIPE_FORMAT_R16G16_FLOAT]            = ISL_FORMAT_R16G16_FLOAT,
+      [PIPE_FORMAT_R16G16B16_FLOAT]         = ISL_FORMAT_R16G16B16_FLOAT,
+      [PIPE_FORMAT_R16G16B16A16_FLOAT]      = ISL_FORMAT_R16G16B16A16_FLOAT,
+
+      [PIPE_FORMAT_L8_SRGB]                 = ISL_FORMAT_L8_UNORM_SRGB,
+      [PIPE_FORMAT_L8A8_SRGB]               = ISL_FORMAT_L8A8_UNORM_SRGB,
+      //[PIPE_FORMAT_R8G8B8_SRGB]             = ISL_FORMAT_R8G8B8_UNORM_SRGB,
+      //[PIPE_FORMAT_A8B8G8R8_SRGB]           = ISL_FORMAT_A8B8G8R8_UNORM_SRGB,
+      //[PIPE_FORMAT_X8B8G8R8_SRGB]           = ISL_FORMAT_X8B8G8R8_UNORM_SRGB,
+      [PIPE_FORMAT_B8G8R8A8_SRGB]           = ISL_FORMAT_B8G8R8A8_UNORM_SRGB,
+      [PIPE_FORMAT_B8G8R8X8_SRGB]           = ISL_FORMAT_B8G8R8X8_UNORM_SRGB,
+      //[PIPE_FORMAT_A8R8G8B8_SRGB]           = ISL_FORMAT_A8R8G8B8_UNORM_SRGB,
+      //[PIPE_FORMAT_X8R8G8B8_SRGB]           = ISL_FORMAT_X8R8G8B8_UNORM_SRGB,
+      [PIPE_FORMAT_R8G8B8A8_SRGB]           = ISL_FORMAT_R8G8B8A8_UNORM_SRGB,
+
+      [PIPE_FORMAT_DXT1_RGB]                = ISL_FORMAT_BC1_UNORM,
+      [PIPE_FORMAT_DXT1_RGBA]               = ISL_FORMAT_BC1_UNORM,
+      [PIPE_FORMAT_DXT3_RGBA]               = ISL_FORMAT_BC2_UNORM,
+      [PIPE_FORMAT_DXT5_RGBA]               = ISL_FORMAT_BC3_UNORM,
+
+      [PIPE_FORMAT_DXT1_SRGB]               = ISL_FORMAT_BC1_UNORM_SRGB,
+      [PIPE_FORMAT_DXT1_SRGBA]              = ISL_FORMAT_BC1_UNORM_SRGB,
+      [PIPE_FORMAT_DXT3_SRGBA]              = ISL_FORMAT_BC2_UNORM_SRGB,
+      [PIPE_FORMAT_DXT5_SRGBA]              = ISL_FORMAT_BC3_UNORM_SRGB,
+
+      [PIPE_FORMAT_RGTC1_UNORM]             = ISL_FORMAT_BC4_UNORM,
+      [PIPE_FORMAT_RGTC1_SNORM]             = ISL_FORMAT_BC4_SNORM,
+      [PIPE_FORMAT_RGTC2_UNORM]             = ISL_FORMAT_BC5_UNORM,
+      [PIPE_FORMAT_RGTC2_SNORM]             = ISL_FORMAT_BC5_SNORM,
+
+      //[PIPE_FORMAT_R8G8_B8G8_UNORM]         = ISL_FORMAT_R8G8_B8G8_UNORM,
+      //[PIPE_FORMAT_G8R8_G8B8_UNORM]         = ISL_FORMAT_G8R8_G8B8_UNORM,
+
+      //[PIPE_FORMAT_R8SG8SB8UX8U_NORM]       = ISL_FORMAT_R8SG8SB8UX8U_NORM,
+      //[PIPE_FORMAT_R5SG5SB6U_NORM]          = ISL_FORMAT_R5SG5SB6U_NORM,
+
+      //[PIPE_FORMAT_A8B8G8R8_UNORM]          = ISL_FORMAT_A8B8G8R8_UNORM,
+      [PIPE_FORMAT_B5G5R5X1_UNORM]          = ISL_FORMAT_B5G5R5X1_UNORM,
+      [PIPE_FORMAT_R10G10B10A2_USCALED]     = ISL_FORMAT_R10G10B10A2_USCALED,
+      [PIPE_FORMAT_R11G11B10_FLOAT]         = ISL_FORMAT_R11G11B10_FLOAT,
+      [PIPE_FORMAT_R9G9B9E5_FLOAT]          = ISL_FORMAT_R9G9B9E5_SHAREDEXP,
+      //[PIPE_FORMAT_Z32_FLOAT_S8X24_UINT]    = ISL_FORMAT_R32_FLOAT_S8X24_UINT,
+      [PIPE_FORMAT_R1_UNORM]                = ISL_FORMAT_R1_UNORM,
+      [PIPE_FORMAT_R10G10B10X2_USCALED]     = ISL_FORMAT_R10G10B10X2_USCALED,
+      //[PIPE_FORMAT_R10G10B10X2_SNORM]       = ISL_FORMAT_R10G10B10X2_SNORM,
+      //[PIPE_FORMAT_L4A4_UNORM]              = ISL_FORMAT_R4G4_UNORM,
+      [PIPE_FORMAT_B10G10R10A2_UNORM]       = ISL_FORMAT_B10G10R10A2_UNORM,
+      //[PIPE_FORMAT_R10SG10SB10SA2U_NORM]    = ISL_FORMAT_R10SG10SB10SA2U_NORM,
+      //[PIPE_FORMAT_R8G8Bx_SNORM]            = ISL_FORMAT_R8G8Bx_SNORM,
+      [PIPE_FORMAT_R8G8B8X8_UNORM]          = ISL_FORMAT_R8G8B8X8_UNORM,
+      //[PIPE_FORMAT_B4G4R4X4_UNORM]          = ISL_FORMAT_B4G4R4X4_UNORM,
+
+      /* some stencil samplers formats */
+      //[PIPE_FORMAT_X24S8_UINT]              = ISL_FORMAT_X24S8_UINT,
+      //[PIPE_FORMAT_S8X24_UINT]              = ISL_FORMAT_S8X24_UINT,
+      //[PIPE_FORMAT_X32_S8X24_UINT]          = ISL_FORMAT_X32_S8X24_UINT,
+
+      //[PIPE_FORMAT_B2G3R3_UNORM]            = ISL_FORMAT_B2G3R3_UNORM,
+      [PIPE_FORMAT_L16A16_UNORM]            = ISL_FORMAT_R16G16_UNORM,
+      [PIPE_FORMAT_A16_UNORM]               = ISL_FORMAT_R16_UNORM,
+      [PIPE_FORMAT_I16_UNORM]               = ISL_FORMAT_R16_UNORM,
+
+      //[PIPE_FORMAT_LATC1_UNORM]             = ISL_FORMAT_LATC1_UNORM,
+      //[PIPE_FORMAT_LATC1_SNORM]             = ISL_FORMAT_LATC1_SNORM,
+      //[PIPE_FORMAT_LATC2_UNORM]             = ISL_FORMAT_LATC2_UNORM,
+      //[PIPE_FORMAT_LATC2_SNORM]             = ISL_FORMAT_LATC2_SNORM,
+
+      [PIPE_FORMAT_A8_SNORM]                = ISL_FORMAT_R8_SNORM,
+      [PIPE_FORMAT_L8_SNORM]                = ISL_FORMAT_R8_SNORM,
+      [PIPE_FORMAT_L8A8_SNORM]              = ISL_FORMAT_R8G8_SNORM,
+      [PIPE_FORMAT_I8_SNORM]                = ISL_FORMAT_R8_SNORM,
+      [PIPE_FORMAT_A16_SNORM]               = ISL_FORMAT_R16_SNORM,
+      [PIPE_FORMAT_L16_SNORM]               = ISL_FORMAT_R16_SNORM,
+      [PIPE_FORMAT_L16A16_SNORM]            = ISL_FORMAT_R16G16_SNORM,
+      [PIPE_FORMAT_I16_SNORM]               = ISL_FORMAT_R16_SNORM,
+
+      [PIPE_FORMAT_A16_FLOAT]               = ISL_FORMAT_R16_FLOAT,
+      [PIPE_FORMAT_L16_FLOAT]               = ISL_FORMAT_R16_FLOAT,
+      [PIPE_FORMAT_L16A16_FLOAT]            = ISL_FORMAT_R16G16_FLOAT,
+      [PIPE_FORMAT_I16_FLOAT]               = ISL_FORMAT_R16_FLOAT,
+      [PIPE_FORMAT_A32_FLOAT]               = ISL_FORMAT_R32_FLOAT,
+      [PIPE_FORMAT_L32_FLOAT]               = ISL_FORMAT_R32_FLOAT,
+      [PIPE_FORMAT_L32A32_FLOAT]            = ISL_FORMAT_R32G32_FLOAT,
+      [PIPE_FORMAT_I32_FLOAT]               = ISL_FORMAT_R32_FLOAT,
+
+      //[PIPE_FORMAT_YV12]                    = ISL_FORMAT_YV12,
+      //[PIPE_FORMAT_YV16]                    = ISL_FORMAT_YV16,
+      //[PIPE_FORMAT_IYUV]                    = ISL_FORMAT_IYUV,
+      //[PIPE_FORMAT_NV12]                    = ISL_FORMAT_NV12,
+      //[PIPE_FORMAT_NV21]                    = ISL_FORMAT_NV21,
+
+      //[PIPE_FORMAT_A4R4_UNORM]              = ISL_FORMAT_A4R4_UNORM,
+      //[PIPE_FORMAT_R4A4_UNORM]              = ISL_FORMAT_R4A4_UNORM,
+      //[PIPE_FORMAT_R8A8_UNORM]              = ISL_FORMAT_R8A8_UNORM,
+      //[PIPE_FORMAT_A8R8_UNORM]              = ISL_FORMAT_A8R8_UNORM,
+
+      [PIPE_FORMAT_R10G10B10A2_SSCALED]     = ISL_FORMAT_R10G10B10A2_SSCALED,
+      [PIPE_FORMAT_R10G10B10A2_SNORM]       = ISL_FORMAT_R10G10B10A2_SNORM,
+
+      [PIPE_FORMAT_B10G10R10A2_USCALED]     = ISL_FORMAT_B10G10R10A2_USCALED,
+      [PIPE_FORMAT_B10G10R10A2_SSCALED]     = ISL_FORMAT_B10G10R10A2_SSCALED,
+      [PIPE_FORMAT_B10G10R10A2_SNORM]       = ISL_FORMAT_B10G10R10A2_SNORM,
+
+      [PIPE_FORMAT_R8_UINT]                 = ISL_FORMAT_R8_UINT,
+      [PIPE_FORMAT_R8G8_UINT]               = ISL_FORMAT_R8G8_UINT,
+      [PIPE_FORMAT_R8G8B8_UINT]             = ISL_FORMAT_R8G8B8_UINT,
+      [PIPE_FORMAT_R8G8B8A8_UINT]           = ISL_FORMAT_R8G8B8A8_UINT,
+
+      [PIPE_FORMAT_R8_SINT]                 = ISL_FORMAT_R8_SINT,
+      [PIPE_FORMAT_R8G8_SINT]               = ISL_FORMAT_R8G8_SINT,
+      [PIPE_FORMAT_R8G8B8_SINT]             = ISL_FORMAT_R8G8B8_SINT,
+      [PIPE_FORMAT_R8G8B8A8_SINT]           = ISL_FORMAT_R8G8B8A8_SINT,
+
+      [PIPE_FORMAT_R16_UINT]                = ISL_FORMAT_R16_UINT,
+      [PIPE_FORMAT_R16G16_UINT]             = ISL_FORMAT_R16G16_UINT,
+      [PIPE_FORMAT_R16G16B16_UINT]          = ISL_FORMAT_R16G16B16_UINT,
+      [PIPE_FORMAT_R16G16B16A16_UINT]       = ISL_FORMAT_R16G16B16A16_UINT,
+
+      [PIPE_FORMAT_R16_SINT]                = ISL_FORMAT_R16_SINT,
+      [PIPE_FORMAT_R16G16_SINT]             = ISL_FORMAT_R16G16_SINT,
+      [PIPE_FORMAT_R16G16B16_SINT]          = ISL_FORMAT_R16G16B16_SINT,
+      [PIPE_FORMAT_R16G16B16A16_SINT]       = ISL_FORMAT_R16G16B16A16_SINT,
+
+      [PIPE_FORMAT_R32_UINT]                = ISL_FORMAT_R32_UINT,
+      [PIPE_FORMAT_R32G32_UINT]             = ISL_FORMAT_R32G32_UINT,
+      [PIPE_FORMAT_R32G32B32_UINT]          = ISL_FORMAT_R32G32B32_UINT,
+      [PIPE_FORMAT_R32G32B32A32_UINT]       = ISL_FORMAT_R32G32B32A32_UINT,
+
+      [PIPE_FORMAT_R32_SINT]                = ISL_FORMAT_R32_SINT,
+      [PIPE_FORMAT_R32G32_SINT]             = ISL_FORMAT_R32G32_SINT,
+      [PIPE_FORMAT_R32G32B32_SINT]          = ISL_FORMAT_R32G32B32_SINT,
+      [PIPE_FORMAT_R32G32B32A32_SINT]       = ISL_FORMAT_R32G32B32A32_SINT,
+
+      [PIPE_FORMAT_A8_UINT]                 = ISL_FORMAT_R8_UINT,
+      [PIPE_FORMAT_I8_UINT]                 = ISL_FORMAT_R8_UINT,
+      [PIPE_FORMAT_L8_UINT]                 = ISL_FORMAT_R8_UINT,
+      [PIPE_FORMAT_L8A8_UINT]               = ISL_FORMAT_R8G8_UINT,
+
+      [PIPE_FORMAT_A8_SINT]                 = ISL_FORMAT_R8_SINT,
+      [PIPE_FORMAT_I8_SINT]                 = ISL_FORMAT_R8_SINT,
+      [PIPE_FORMAT_L8_SINT]                 = ISL_FORMAT_R8_SINT,
+      [PIPE_FORMAT_L8A8_SINT]               = ISL_FORMAT_R8G8_SINT,
+
+      [PIPE_FORMAT_A16_UINT]                = ISL_FORMAT_R16_UINT,
+      [PIPE_FORMAT_I16_UINT]                = ISL_FORMAT_R16_UINT,
+      [PIPE_FORMAT_L16_UINT]                = ISL_FORMAT_R16_UINT,
+      [PIPE_FORMAT_L16A16_UINT]             = ISL_FORMAT_R16G16_UINT,
+
+      [PIPE_FORMAT_A16_SINT]                = ISL_FORMAT_R16_SINT,
+      [PIPE_FORMAT_I16_SINT]                = ISL_FORMAT_R16_SINT,
+      [PIPE_FORMAT_L16_SINT]                = ISL_FORMAT_R16_SINT,
+      [PIPE_FORMAT_L16A16_SINT]             = ISL_FORMAT_R16G16_SINT,
+
+      [PIPE_FORMAT_A32_UINT]                = ISL_FORMAT_R32_UINT,
+      [PIPE_FORMAT_I32_UINT]                = ISL_FORMAT_R32_UINT,
+      [PIPE_FORMAT_L32_UINT]                = ISL_FORMAT_R32_UINT,
+      [PIPE_FORMAT_L32A32_UINT]             = ISL_FORMAT_R32G32_UINT,
+
+      [PIPE_FORMAT_A32_SINT]                = ISL_FORMAT_R32_SINT,
+      [PIPE_FORMAT_I32_SINT]                = ISL_FORMAT_R32_SINT,
+      [PIPE_FORMAT_L32_SINT]                = ISL_FORMAT_R32_SINT,
+      [PIPE_FORMAT_L32A32_SINT]             = ISL_FORMAT_R32G32_SINT,
+
+      [PIPE_FORMAT_B10G10R10A2_UINT]        = ISL_FORMAT_B10G10R10A2_UINT,
+
+      [PIPE_FORMAT_ETC1_RGB8]               = ISL_FORMAT_ETC1_RGB8,
+
+      //[PIPE_FORMAT_R8G8_R8B8_UNORM]         = ISL_FORMAT_R8G8_R8B8_UNORM,
+      //[PIPE_FORMAT_G8R8_B8R8_UNORM]         = ISL_FORMAT_G8R8_B8R8_UNORM,
+
+      //[PIPE_FORMAT_R8G8B8X8_SNORM]          = ISL_FORMAT_R8G8B8X8_SNORM,
+      [PIPE_FORMAT_R8G8B8X8_SRGB]           = ISL_FORMAT_R8G8B8X8_UNORM_SRGB,
+      //[PIPE_FORMAT_R8G8B8X8_UINT]           = ISL_FORMAT_R8G8B8X8_UINT,
+      //[PIPE_FORMAT_R8G8B8X8_SINT]           = ISL_FORMAT_R8G8B8X8_SINT,
+      [PIPE_FORMAT_B10G10R10X2_UNORM]       = ISL_FORMAT_B10G10R10X2_UNORM,
+      [PIPE_FORMAT_R16G16B16X16_UNORM]      = ISL_FORMAT_R16G16B16X16_UNORM,
+      //[PIPE_FORMAT_R16G16B16X16_SNORM]      = ISL_FORMAT_R16G16B16X16_SNORM,
+      [PIPE_FORMAT_R16G16B16X16_FLOAT]      = ISL_FORMAT_R16G16B16X16_FLOAT,
+      //[PIPE_FORMAT_R16G16B16X16_UINT]       = ISL_FORMAT_R16G16B16X16_UINT,
+      //[PIPE_FORMAT_R16G16B16X16_SINT]       = ISL_FORMAT_R16G16B16X16_SINT,
+      [PIPE_FORMAT_R32G32B32X32_FLOAT]      = ISL_FORMAT_R32G32B32X32_FLOAT,
+      //[PIPE_FORMAT_R32G32B32X32_UINT]       = ISL_FORMAT_R32G32B32X32_UINT,
+      //[PIPE_FORMAT_R32G32B32X32_SINT]       = ISL_FORMAT_R32G32B32X32_SINT,
+
+      //[PIPE_FORMAT_R8A8_SNORM]              = ISL_FORMAT_R8A8_SNORM,
+      //[PIPE_FORMAT_R16A16_UNORM]            = ISL_FORMAT_R16A16_UNORM,
+      //[PIPE_FORMAT_R16A16_SNORM]            = ISL_FORMAT_R16A16_SNORM,
+      //[PIPE_FORMAT_R16A16_FLOAT]            = ISL_FORMAT_R16A16_FLOAT,
+      //[PIPE_FORMAT_R32A32_FLOAT]            = ISL_FORMAT_R32A32_FLOAT,
+      //[PIPE_FORMAT_R8A8_UINT]               = ISL_FORMAT_R8A8_UINT,
+      //[PIPE_FORMAT_R8A8_SINT]               = ISL_FORMAT_R8A8_SINT,
+      //[PIPE_FORMAT_R16A16_UINT]             = ISL_FORMAT_R16A16_UINT,
+      //[PIPE_FORMAT_R16A16_SINT]             = ISL_FORMAT_R16A16_SINT,
+      //[PIPE_FORMAT_R32A32_UINT]             = ISL_FORMAT_R32A32_UINT,
+      //[PIPE_FORMAT_R32A32_SINT]             = ISL_FORMAT_R32A32_SINT,
+      [PIPE_FORMAT_R10G10B10A2_UINT]        = ISL_FORMAT_R10G10B10A2_UINT,
+
+      [PIPE_FORMAT_B5G6R5_SRGB]             = ISL_FORMAT_B5G6R5_UNORM_SRGB,
+
+      [PIPE_FORMAT_BPTC_RGBA_UNORM]         = ISL_FORMAT_BC7_UNORM,
+      [PIPE_FORMAT_BPTC_SRGBA]              = ISL_FORMAT_BC7_UNORM_SRGB,
+      [PIPE_FORMAT_BPTC_RGB_FLOAT]          = ISL_FORMAT_BC6H_SF16,
+      [PIPE_FORMAT_BPTC_RGB_UFLOAT]         = ISL_FORMAT_BC6H_UF16,
+
+      //[PIPE_FORMAT_A8L8_UNORM]              = ISL_FORMAT_A8L8_UNORM,
+      //[PIPE_FORMAT_A8L8_SNORM]              = ISL_FORMAT_A8L8_SNORM,
+      //[PIPE_FORMAT_A8L8_SRGB]               = ISL_FORMAT_A8L8_SRGB,
+      //[PIPE_FORMAT_A16L16_UNORM]            = ISL_FORMAT_A16L16_UNORM,
+
+      //[PIPE_FORMAT_G8R8_UNORM]              = ISL_FORMAT_G8R8_UNORM,
+      //[PIPE_FORMAT_G8R8_SNORM]              = ISL_FORMAT_G8R8_SNORM,
+      //[PIPE_FORMAT_G16R16_UNORM]            = ISL_FORMAT_G16R16_UNORM,
+      //[PIPE_FORMAT_G16R16_SNORM]            = ISL_FORMAT_G16R16_SNORM,
+
+      //[PIPE_FORMAT_A8B8G8R8_SNORM]          = ISL_FORMAT_A8B8G8R8_SNORM,
+      //[PIPE_FORMAT_X8B8G8R8_SNORM]          = ISL_FORMAT_X8B8G8R8_SNORM,
+
+      [PIPE_FORMAT_ETC2_RGB8]               = ISL_FORMAT_ETC2_RGB8,
+      [PIPE_FORMAT_ETC2_SRGB8]              = ISL_FORMAT_ETC2_SRGB8,
+      [PIPE_FORMAT_ETC2_RGB8A1]             = ISL_FORMAT_ETC2_RGB8_PTA,
+      [PIPE_FORMAT_ETC2_SRGB8A1]            = ISL_FORMAT_ETC2_SRGB8_PTA,
+      [PIPE_FORMAT_ETC2_RGBA8]              = ISL_FORMAT_ETC2_EAC_RGBA8,
+      [PIPE_FORMAT_ETC2_SRGBA8]             = ISL_FORMAT_ETC2_EAC_SRGB8_A8,
+      [PIPE_FORMAT_ETC2_R11_UNORM]          = ISL_FORMAT_EAC_R11,
+      [PIPE_FORMAT_ETC2_R11_SNORM]          = ISL_FORMAT_EAC_SIGNED_R11,
+      [PIPE_FORMAT_ETC2_RG11_UNORM]         = ISL_FORMAT_EAC_RG11,
+      [PIPE_FORMAT_ETC2_RG11_SNORM]         = ISL_FORMAT_EAC_SIGNED_RG11,
+
+
+      [PIPE_FORMAT_ASTC_4x4]                = ISL_FORMAT_ASTC_LDR_2D_4X4_FLT16,
+      [PIPE_FORMAT_ASTC_5x4]                = ISL_FORMAT_ASTC_LDR_2D_5X4_FLT16,
+      [PIPE_FORMAT_ASTC_5x5]                = ISL_FORMAT_ASTC_LDR_2D_5X5_FLT16,
+      [PIPE_FORMAT_ASTC_6x5]                = ISL_FORMAT_ASTC_LDR_2D_6X5_FLT16,
+      [PIPE_FORMAT_ASTC_6x6]                = ISL_FORMAT_ASTC_LDR_2D_6X6_FLT16,
+      [PIPE_FORMAT_ASTC_8x5]                = ISL_FORMAT_ASTC_LDR_2D_8X5_FLT16,
+      [PIPE_FORMAT_ASTC_8x6]                = ISL_FORMAT_ASTC_LDR_2D_8X6_FLT16,
+      [PIPE_FORMAT_ASTC_8x8]                = ISL_FORMAT_ASTC_LDR_2D_8X8_FLT16,
+      [PIPE_FORMAT_ASTC_10x5]               = ISL_FORMAT_ASTC_LDR_2D_10X5_FLT16,
+      [PIPE_FORMAT_ASTC_10x6]               = ISL_FORMAT_ASTC_LDR_2D_10X6_FLT16,
+      [PIPE_FORMAT_ASTC_10x8]               = ISL_FORMAT_ASTC_LDR_2D_10X8_FLT16,
+      [PIPE_FORMAT_ASTC_10x10]              = ISL_FORMAT_ASTC_LDR_2D_10X10_FLT16,
+      [PIPE_FORMAT_ASTC_12x10]              = ISL_FORMAT_ASTC_LDR_2D_12X10_FLT16,
+      [PIPE_FORMAT_ASTC_12x12]              = ISL_FORMAT_ASTC_LDR_2D_12X12_FLT16,
+
+      [PIPE_FORMAT_ASTC_4x4_SRGB]           = ISL_FORMAT_ASTC_LDR_2D_4X4_U8SRGB,
+      [PIPE_FORMAT_ASTC_5x4_SRGB]           = ISL_FORMAT_ASTC_LDR_2D_5X4_U8SRGB,
+      [PIPE_FORMAT_ASTC_5x5_SRGB]           = ISL_FORMAT_ASTC_LDR_2D_5X5_U8SRGB,
+      [PIPE_FORMAT_ASTC_6x5_SRGB]           = ISL_FORMAT_ASTC_LDR_2D_6X5_U8SRGB,
+      [PIPE_FORMAT_ASTC_6x6_SRGB]           = ISL_FORMAT_ASTC_LDR_2D_6X6_U8SRGB,
+      [PIPE_FORMAT_ASTC_8x5_SRGB]           = ISL_FORMAT_ASTC_LDR_2D_8X5_U8SRGB,
+      [PIPE_FORMAT_ASTC_8x6_SRGB]           = ISL_FORMAT_ASTC_LDR_2D_8X6_U8SRGB,
+      [PIPE_FORMAT_ASTC_8x8_SRGB]           = ISL_FORMAT_ASTC_LDR_2D_8X8_U8SRGB,
+      [PIPE_FORMAT_ASTC_10x5_SRGB]          = ISL_FORMAT_ASTC_LDR_2D_10X5_U8SRGB,
+      [PIPE_FORMAT_ASTC_10x6_SRGB]          = ISL_FORMAT_ASTC_LDR_2D_10X6_U8SRGB,
+      [PIPE_FORMAT_ASTC_10x8_SRGB]          = ISL_FORMAT_ASTC_LDR_2D_10X8_U8SRGB,
+      [PIPE_FORMAT_ASTC_10x10_SRGB]         = ISL_FORMAT_ASTC_LDR_2D_10X10_U8SRGB,
+      [PIPE_FORMAT_ASTC_12x10_SRGB]         = ISL_FORMAT_ASTC_LDR_2D_12X10_U8SRGB,
+      [PIPE_FORMAT_ASTC_12x12_SRGB]         = ISL_FORMAT_ASTC_LDR_2D_12X12_U8SRGB,
+
+      //[PIPE_FORMAT_P016]                    = ISL_FORMAT_P016,
+
+      //[PIPE_FORMAT_R10G10B10X2_UNORM]       = ISL_FORMAT_R10G10B10X2_UNORM,
+      //[PIPE_FORMAT_A1B5G5R5_UNORM]          = ISL_FORMAT_A1B5G5R5_UNORM,
+      //[PIPE_FORMAT_X1B5G5R5_UNORM]          = ISL_FORMAT_X1B5G5R5_UNORM,
+   };
+   assert(pf < PIPE_FORMAT_COUNT);
+   return table[pf];
+}
+
+boolean
+iris_is_format_supported(struct pipe_screen *pscreen,
+                         enum pipe_format pformat,
+                         enum pipe_texture_target target,
+                         unsigned sample_count,
+                         unsigned storage_sample_count,
+                         unsigned usage)
+{
+   struct iris_screen *screen = (struct iris_screen *) pscreen;
+   const struct gen_device_info *devinfo = &screen->devinfo;
+
+   // XXX: msaa max
+   if (sample_count > 1)
+      return false;
+
+   bool supported = true;
+
+   enum isl_format format = iris_isl_format_for_pipe_format(pformat);
+
+   if (sample_count > 1)
+      supported &= isl_format_supports_multisampling(devinfo, format);
+
+   if (usage & PIPE_BIND_DEPTH_STENCIL) {
+      supported &= format == ISL_FORMAT_R32_FLOAT_X8X24_TYPELESS ||
+                   format == ISL_FORMAT_R32_FLOAT ||
+                   format == ISL_FORMAT_R24_UNORM_X8_TYPELESS ||
+                   format == ISL_FORMAT_R16_UNORM;
+   }
+
+   if (usage & PIPE_BIND_RENDER_TARGET)
+      supported &= isl_format_supports_rendering(devinfo, format);
+
+   if (usage & PIPE_BIND_SHADER_IMAGE) {
+      // XXX: allow untyped reads
+      supported &= isl_format_supports_typed_reads(devinfo, format) &&
+                   isl_format_supports_typed_writes(devinfo, format);
+   }
+
+   if (usage & PIPE_BIND_SAMPLER_VIEW)
+      supported &= isl_format_supports_sampling(devinfo, format);
+
+   if (usage & PIPE_BIND_VERTEX_BUFFER)
+      supported &= isl_format_supports_vertex_fetch(devinfo, format);
+
+   if (usage & PIPE_BIND_INDEX_BUFFER) {
+      supported &= format == ISL_FORMAT_R8_UINT ||
+                   format == ISL_FORMAT_R16_UINT ||
+                   format == ISL_FORMAT_R32_UINT;
+   }
+
+   if (usage & PIPE_BIND_CONSTANT_BUFFER) {
+      // XXX:
+   }
+
+   if (usage & PIPE_BIND_STREAM_OUTPUT) {
+      // XXX:
+   }
+
+   if (usage & PIPE_BIND_CURSOR) {
+      // XXX:
+   }
+
+   if (usage & PIPE_BIND_CUSTOM) {
+      // XXX:
+   }
+
+   if (usage & PIPE_BIND_SHADER_BUFFER) {
+      // XXX:
+   }
+
+   if (usage & PIPE_BIND_COMPUTE_RESOURCE) {
+      // XXX:
+   }
+
+   if (usage & PIPE_BIND_COMMAND_ARGS_BUFFER) {
+      // XXX:
+   }
+
+   if (usage & PIPE_BIND_QUERY_BUFFER) {
+      // XXX:
+   }
+
+   return true;
+}
+
diff --git a/src/gallium/drivers/iris/iris_pipe.c b/src/gallium/drivers/iris/iris_pipe.c
new file mode 100644 (file)
index 0000000..a088da8
--- /dev/null
@@ -0,0 +1,309 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "util/u_inlines.h"
+#include "util/u_format.h"
+#include "util/u_upload_mgr.h"
+#include "util/ralloc.h"
+#include "iris_context.h"
+#include "iris_resource.h"
+#include "iris_screen.h"
+#include "intel/compiler/brw_compiler.h"
+
+/**
+ * For debugging purposes, this returns a time in seconds.
+ */
+double
+get_time(void)
+{
+   struct timespec tp;
+
+   clock_gettime(CLOCK_MONOTONIC, &tp);
+
+   return tp.tv_sec + tp.tv_nsec / 1000000000.0;
+}
+
+/*
+ * query
+ */
+struct iris_query {
+   unsigned query;
+};
+static struct pipe_query *
+iris_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
+{
+   struct iris_query *query = calloc(1, sizeof(struct iris_query));
+
+   return (struct pipe_query *)query;
+}
+
+static void
+iris_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
+{
+   free(query);
+}
+
+static boolean
+iris_begin_query(struct pipe_context *ctx, struct pipe_query *query)
+{
+   return true;
+}
+
+static bool
+iris_end_query(struct pipe_context *ctx, struct pipe_query *query)
+{
+   return true;
+}
+
+static boolean
+iris_get_query_result(struct pipe_context *ctx,
+                      struct pipe_query *query,
+                      boolean wait,
+                      union pipe_query_result *vresult)
+{
+   uint64_t *result = (uint64_t*)vresult;
+
+   *result = 0;
+   return TRUE;
+}
+
+static void
+iris_set_active_query_state(struct pipe_context *pipe, boolean enable)
+{
+}
+
+
+/*
+ * transfer
+ */
+static void *
+iris_transfer_map(struct pipe_context *pipe,
+                  struct pipe_resource *resource,
+                  unsigned level,
+                  enum pipe_transfer_usage usage,
+                  const struct pipe_box *box,
+                  struct pipe_transfer **ptransfer)
+{
+   struct pipe_transfer *transfer;
+   struct iris_resource *res = (struct iris_resource *)resource;
+
+   transfer = calloc(1, sizeof(struct pipe_transfer));
+   if (!transfer)
+      return NULL;
+   pipe_resource_reference(&transfer->resource, resource);
+   transfer->level = level;
+   transfer->usage = usage;
+   transfer->box = *box;
+   transfer->stride = 1;
+   transfer->layer_stride = 1;
+   *ptransfer = transfer;
+
+   return NULL;
+}
+
+static void
+iris_transfer_flush_region(struct pipe_context *pipe,
+                           struct pipe_transfer *transfer,
+                           const struct pipe_box *box)
+{
+}
+
+static void
+iris_transfer_unmap(struct pipe_context *pipe,
+                    struct pipe_transfer *transfer)
+{
+   pipe_resource_reference(&transfer->resource, NULL);
+   free(transfer);
+}
+
+static void
+iris_buffer_subdata(struct pipe_context *pipe,
+                    struct pipe_resource *resource,
+                    unsigned usage, unsigned offset,
+                    unsigned size, const void *data)
+{
+}
+
+static void
+iris_texture_subdata(struct pipe_context *pipe,
+                     struct pipe_resource *resource,
+                     unsigned level,
+                     unsigned usage,
+                     const struct pipe_box *box,
+                     const void *data,
+                     unsigned stride,
+                     unsigned layer_stride)
+{
+}
+
+
+/*
+  *clear/copy
+ */
+static void
+iris_clear(struct pipe_context *ctx, unsigned buffers,
+           const union pipe_color_union *color, double depth, unsigned stencil)
+{
+}
+
+static void
+iris_clear_render_target(struct pipe_context *ctx,
+                         struct pipe_surface *dst,
+                         const union pipe_color_union *color,
+                         unsigned dstx, unsigned dsty,
+                         unsigned width, unsigned height,
+                         bool render_condition_enabled)
+{
+}
+
+static void
+iris_clear_depth_stencil(struct pipe_context *ctx,
+                         struct pipe_surface *dst,
+                         unsigned clear_flags,
+                         double depth,
+                         unsigned stencil,
+                         unsigned dstx, unsigned dsty,
+                         unsigned width, unsigned height,
+                         bool render_condition_enabled)
+{
+}
+
+static void
+iris_resource_copy_region(struct pipe_context *ctx,
+                          struct pipe_resource *dst,
+                          unsigned dst_level,
+                          unsigned dstx, unsigned dsty, unsigned dstz,
+                          struct pipe_resource *src,
+                          unsigned src_level,
+                          const struct pipe_box *src_box)
+{
+}
+
+static void
+iris_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
+{
+}
+
+
+static void
+iris_flush_resource(struct pipe_context *ctx, struct pipe_resource *resource)
+{
+}
+
+
+/*
+ * context
+ */
+static void
+iris_flush(struct pipe_context *ctx,
+           struct pipe_fence_handle **fence,
+           unsigned flags)
+{
+   if (fence)
+      *fence = NULL;
+}
+
+static void
+iris_destroy_context(struct pipe_context *ctx)
+{
+   if (ctx->stream_uploader)
+      u_upload_destroy(ctx->stream_uploader);
+
+   free(ctx);
+}
+
+static boolean
+iris_generate_mipmap(struct pipe_context *ctx,
+                     struct pipe_resource *resource,
+                     enum pipe_format format,
+                     unsigned base_level,
+                     unsigned last_level,
+                     unsigned first_layer,
+                     unsigned last_layer)
+{
+   return true;
+}
+
+static void
+iris_set_debug_callback(struct pipe_context *ctx,
+                        const struct pipe_debug_callback *cb)
+{
+   struct iris_context *ice = (struct iris_context *)ctx;
+
+   if (cb)
+      ice->dbg = *cb;
+   else
+      memset(&ice->dbg, 0, sizeof(ice->dbg));
+}
+
+struct pipe_context *
+iris_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
+{
+   struct iris_context *ice = calloc(1, sizeof(struct iris_context));
+
+   if (!ice)
+      return NULL;
+
+   struct pipe_context *ctx = &ice->ctx;
+
+   ctx->screen = screen;
+   ctx->priv = priv;
+
+   ctx->stream_uploader = u_upload_create_default(ctx);
+   if (!ctx->stream_uploader) {
+      free(ctx);
+      return NULL;
+   }
+   ctx->const_uploader = ctx->stream_uploader;
+
+   ctx->destroy = iris_destroy_context;
+   ctx->flush = iris_flush;
+   ctx->clear = iris_clear;
+   ctx->clear_render_target = iris_clear_render_target;
+   ctx->clear_depth_stencil = iris_clear_depth_stencil;
+   ctx->resource_copy_region = iris_resource_copy_region;
+   ctx->generate_mipmap = iris_generate_mipmap;
+   ctx->blit = iris_blit;
+   ctx->flush_resource = iris_flush_resource;
+   ctx->create_query = iris_create_query;
+   ctx->destroy_query = iris_destroy_query;
+   ctx->begin_query = iris_begin_query;
+   ctx->end_query = iris_end_query;
+   ctx->get_query_result = iris_get_query_result;
+   ctx->set_active_query_state = iris_set_active_query_state;
+   ctx->transfer_map = iris_transfer_map;
+   ctx->transfer_flush_region = iris_transfer_flush_region;
+   ctx->transfer_unmap = iris_transfer_unmap;
+   ctx->buffer_subdata = iris_buffer_subdata;
+   ctx->texture_subdata = iris_texture_subdata;
+   ctx->set_debug_callback = iris_set_debug_callback;
+   iris_init_program_functions(ctx);
+   iris_init_state_functions(ctx);
+
+   return ctx;
+}
diff --git a/src/gallium/drivers/iris/iris_program.c b/src/gallium/drivers/iris/iris_program.c
new file mode 100644 (file)
index 0000000..cc99431
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "util/u_atomic.h"
+#include "compiler/nir/nir.h"
+#include "compiler/nir/nir_builder.h"
+#include "intel/compiler/brw_compiler.h"
+#include "intel/compiler/brw_nir.h"
+#include "iris_context.h"
+
+static unsigned
+get_new_program_id(struct iris_screen *screen)
+{
+   return p_atomic_inc_return(&screen->program_id);
+}
+
+struct iris_uncompiled_shader {
+   struct pipe_shader_state base;
+   unsigned program_id;
+};
+
+static void *
+iris_create_shader_state(struct pipe_context *ctx,
+                         const struct pipe_shader_state *state)
+{
+   struct iris_context *ice = (struct iris_context *)ctx;
+   struct iris_screen *screen = (struct iris_screen *)ctx->screen;
+
+   assert(state->type == PIPE_SHADER_IR_NIR);
+
+   nir_shader *nir = state->ir.nir;
+
+   struct iris_uncompiled_shader *cso =
+      calloc(1, sizeof(struct iris_uncompiled_shader));
+   if (!cso)
+      return NULL;
+
+   nir = brw_preprocess_nir(screen->compiler, nir);
+
+   cso->program_id = get_new_program_id(screen);
+   cso->base.type = PIPE_SHADER_IR_NIR;
+   cso->base.ir.nir = nir;
+
+   return cso;
+}
+
+static void
+iris_delete_shader_state(struct pipe_context *ctx, void *hwcso)
+{
+   struct iris_uncompiled_shader *cso = hwcso;
+
+   ralloc_free(cso->base.ir.nir);
+   free(cso);
+}
+
+void
+iris_init_program_functions(struct pipe_context *ctx)
+{
+   ctx->create_vs_state = iris_create_shader_state;
+   ctx->create_tcs_state = iris_create_shader_state;
+   ctx->create_tes_state = iris_create_shader_state;
+   ctx->create_gs_state = iris_create_shader_state;
+   ctx->create_fs_state = iris_create_shader_state;
+
+   ctx->delete_vs_state = iris_delete_shader_state;
+   ctx->delete_tcs_state = iris_delete_shader_state;
+   ctx->delete_tes_state = iris_delete_shader_state;
+   ctx->delete_gs_state = iris_delete_shader_state;
+   ctx->delete_fs_state = iris_delete_shader_state;
+}
diff --git a/src/gallium/drivers/iris/iris_resource.c b/src/gallium/drivers/iris/iris_resource.c
new file mode 100644 (file)
index 0000000..6598574
--- /dev/null
@@ -0,0 +1,368 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "util/u_inlines.h"
+#include "util/u_format.h"
+#include "util/u_upload_mgr.h"
+#include "util/ralloc.h"
+#include "iris_resource.h"
+#include "iris_screen.h"
+#include "intel/common/gen_debug.h"
+#include "drm-uapi/drm_fourcc.h"
+#include "drm-uapi/i915_drm.h"
+
+enum modifier_priority {
+   MODIFIER_PRIORITY_INVALID = 0,
+   MODIFIER_PRIORITY_LINEAR,
+   MODIFIER_PRIORITY_X,
+   MODIFIER_PRIORITY_Y,
+   MODIFIER_PRIORITY_Y_CCS,
+};
+
+static const uint64_t priority_to_modifier[] = {
+   [MODIFIER_PRIORITY_INVALID] = DRM_FORMAT_MOD_INVALID,
+   [MODIFIER_PRIORITY_LINEAR] = DRM_FORMAT_MOD_LINEAR,
+   [MODIFIER_PRIORITY_X] = I915_FORMAT_MOD_X_TILED,
+   [MODIFIER_PRIORITY_Y] = I915_FORMAT_MOD_Y_TILED,
+   [MODIFIER_PRIORITY_Y_CCS] = I915_FORMAT_MOD_Y_TILED_CCS,
+};
+
+static bool
+modifier_is_supported(const struct gen_device_info *devinfo,
+                      uint64_t modifier)
+{
+   /* XXX: do something real */
+   switch (modifier) {
+   case I915_FORMAT_MOD_Y_TILED:
+   case I915_FORMAT_MOD_X_TILED:
+   case DRM_FORMAT_MOD_LINEAR:
+      return true;
+   case I915_FORMAT_MOD_Y_TILED_CCS:
+   case DRM_FORMAT_MOD_INVALID:
+   default:
+      return false;
+   }
+}
+
+static uint64_t
+select_best_modifier(struct gen_device_info *devinfo,
+                     const uint64_t *modifiers,
+                     int count)
+{
+   enum modifier_priority prio = MODIFIER_PRIORITY_INVALID;
+
+   for (int i = 0; i < count; i++) {
+      if (!modifier_is_supported(devinfo, modifiers[i]))
+         continue;
+
+      switch (modifiers[i]) {
+      case I915_FORMAT_MOD_Y_TILED_CCS:
+         prio = MAX2(prio, MODIFIER_PRIORITY_Y_CCS);
+         break;
+      case I915_FORMAT_MOD_Y_TILED:
+         prio = MAX2(prio, MODIFIER_PRIORITY_Y);
+         break;
+      case I915_FORMAT_MOD_X_TILED:
+         prio = MAX2(prio, MODIFIER_PRIORITY_X);
+         break;
+      case DRM_FORMAT_MOD_LINEAR:
+         prio = MAX2(prio, MODIFIER_PRIORITY_LINEAR);
+         break;
+      case DRM_FORMAT_MOD_INVALID:
+      default:
+         break;
+      }
+   }
+
+   return priority_to_modifier[prio];
+}
+
+static enum isl_surf_dim
+target_to_isl_surf_dim(enum pipe_texture_target target)
+{
+   switch (target) {
+   case PIPE_BUFFER:
+   case PIPE_TEXTURE_1D:
+   case PIPE_TEXTURE_1D_ARRAY:
+      return ISL_SURF_DIM_1D;
+   case PIPE_TEXTURE_2D:
+   case PIPE_TEXTURE_CUBE:
+   case PIPE_TEXTURE_RECT:
+   case PIPE_TEXTURE_2D_ARRAY:
+   case PIPE_TEXTURE_CUBE_ARRAY:
+      return ISL_SURF_DIM_2D;
+   case PIPE_TEXTURE_3D:
+      return ISL_SURF_DIM_3D;
+   case PIPE_MAX_TEXTURE_TYPES:
+      break;
+   }
+   unreachable("invalid texture type");
+}
+
+static isl_surf_usage_flags_t
+pipe_bind_to_isl_usage(unsigned bindings)
+{
+   isl_surf_usage_flags_t usage = 0;
+
+   if (bindings & PIPE_BIND_DEPTH_STENCIL)
+      usage |= ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_STENCIL_BIT;
+
+   if (bindings & PIPE_BIND_RENDER_TARGET)
+      usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
+
+   if (bindings & PIPE_BIND_SHADER_IMAGE)
+      usage |= ISL_SURF_USAGE_STORAGE_BIT;
+
+   if (bindings & PIPE_BIND_DISPLAY_TARGET)
+      usage |= ISL_SURF_USAGE_DISPLAY_BIT;
+
+   /* XXX: what to do with these? */
+   if (bindings & PIPE_BIND_BLENDABLE)
+      ;
+   if (bindings & PIPE_BIND_SAMPLER_VIEW)
+      ;
+   if (bindings & PIPE_BIND_VERTEX_BUFFER)
+      ;
+   if (bindings & PIPE_BIND_INDEX_BUFFER)
+      ;
+   if (bindings & PIPE_BIND_CONSTANT_BUFFER)
+      ;
+
+   if (bindings & PIPE_BIND_STREAM_OUTPUT)
+      ;
+   if (bindings & PIPE_BIND_CURSOR)
+      ;
+   if (bindings & PIPE_BIND_CUSTOM)
+      ;
+
+   if (bindings & PIPE_BIND_GLOBAL)
+      ;
+   if (bindings & PIPE_BIND_SHADER_BUFFER)
+      ;
+   if (bindings & PIPE_BIND_COMPUTE_RESOURCE)
+      ;
+   if (bindings & PIPE_BIND_COMMAND_ARGS_BUFFER)
+      ;
+   if (bindings & PIPE_BIND_QUERY_BUFFER)
+      ;
+
+   return usage;
+}
+
+static void
+iris_resource_destroy(struct pipe_screen *screen,
+                      struct pipe_resource *resource)
+{
+   struct iris_resource *res = (struct iris_resource *)resource;
+
+   iris_bo_unreference(res->bo);
+}
+
+static struct iris_resource *
+iris_alloc_resource(struct pipe_screen *pscreen,
+                    const struct pipe_resource *templ)
+{
+   struct iris_resource *res = calloc(1, sizeof(struct iris_resource));
+   if (!res)
+      return NULL;
+
+   res->base = *templ;
+   res->base.screen = pscreen;
+   pipe_reference_init(&res->base.reference, 1);
+
+   return res;
+}
+
+static struct pipe_resource *
+iris_resource_create_with_modifiers(struct pipe_screen *pscreen,
+                                    const struct pipe_resource *templ,
+                                    const uint64_t *modifiers,
+                                    int modifiers_count)
+{
+   struct iris_screen *screen = (struct iris_screen *)pscreen;
+   struct gen_device_info *devinfo = &screen->devinfo;
+   struct iris_resource *res = iris_alloc_resource(pscreen, templ);
+   if (!res)
+      return NULL;
+
+   uint64_t modifier = DRM_FORMAT_MOD_INVALID;
+
+   if (templ->target == PIPE_BUFFER)
+      modifier = DRM_FORMAT_MOD_LINEAR;
+
+   if (templ->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR))
+      modifier = DRM_FORMAT_MOD_LINEAR;
+
+   if (modifiers_count == 0) {
+      /* Display is X-tiled for historical reasons. */
+      modifier = (templ->bind & PIPE_BIND_DISPLAY_TARGET) ?
+                 I915_FORMAT_MOD_X_TILED : I915_FORMAT_MOD_Y_TILED;
+      /* XXX: make sure this doesn't do stupid things for internal textures */
+   }
+
+   if (modifier == DRM_FORMAT_MOD_INVALID) {
+      /* User requested specific modifiers */
+      modifier = select_best_modifier(devinfo, modifiers, modifiers_count);
+      if (modifier == DRM_FORMAT_MOD_INVALID)
+         return NULL;
+   }
+
+   const struct isl_drm_modifier_info *mod_info =
+      isl_drm_modifier_get_info(modifier);
+
+   isl_surf_usage_flags_t usage = pipe_bind_to_isl_usage(templ->bind);
+
+   if (templ->target == PIPE_TEXTURE_CUBE)
+      usage |= ISL_SURF_USAGE_CUBE_BIT;
+
+   isl_surf_init(&screen->isl_dev, &res->surf,
+                 .dim = target_to_isl_surf_dim(templ->target),
+                 .format = iris_isl_format_for_pipe_format(templ->format),
+                 .width = templ->width0,
+                 .height = templ->height0,
+                 .depth = templ->depth0,
+                 .levels = templ->last_level + 1,
+                 .array_len = templ->array_size,
+                 .samples = MAX2(templ->nr_samples, 1),
+                 .min_alignment_B = 0,
+                 .row_pitch_B = 0,
+                 .usage = usage,
+                 .tiling_flags = 1 << mod_info->tiling);
+
+   res->bo = iris_bo_alloc_tiled(screen->bufmgr, "resource", res->surf.size_B,
+                                 isl_tiling_to_i915_tiling(res->surf.tiling),
+                                 res->surf.row_pitch_B, 0);
+   if (!res->bo)
+      goto fail;
+
+   return &res->base;
+
+fail:
+   iris_resource_destroy(pscreen, &res->base);
+   return NULL;
+}
+
+static struct pipe_resource *
+iris_resource_create(struct pipe_screen *pscreen,
+                     const struct pipe_resource *templ)
+{
+   return iris_resource_create_with_modifiers(pscreen, templ, NULL, 0);
+}
+
+static struct pipe_resource *
+iris_resource_from_handle(struct pipe_screen *pscreen,
+                          const struct pipe_resource *templ,
+                          struct winsys_handle *whandle,
+                          unsigned usage)
+{
+   struct iris_screen *screen = (struct iris_screen *)pscreen;
+   struct iris_bufmgr *bufmgr = screen->bufmgr;
+   struct iris_resource *res = iris_alloc_resource(pscreen, templ);
+   if (!res)
+      return NULL;
+
+   if (whandle->offset != 0) {
+      dbg_printf("Attempt to import unsupported winsys offset %u\n",
+                 whandle->offset);
+      goto fail;
+   }
+
+   switch (whandle->type) {
+   case WINSYS_HANDLE_TYPE_SHARED:
+      res->bo = iris_bo_import_dmabuf(bufmgr, whandle->handle);
+      break;
+   case WINSYS_HANDLE_TYPE_FD:
+      res->bo = iris_bo_gem_create_from_name(bufmgr, "winsys image",
+                                             whandle->handle);
+      break;
+   default:
+      unreachable("invalid winsys handle type");
+   }
+
+   const struct isl_drm_modifier_info *mod_info =
+      isl_drm_modifier_get_info(whandle->modifier);
+
+   // XXX: usage...
+   isl_surf_usage_flags_t isl_usage = ISL_SURF_USAGE_DISPLAY_BIT;
+
+   isl_surf_init(&screen->isl_dev, &res->surf,
+                 .dim = target_to_isl_surf_dim(templ->target),
+                 .format = iris_isl_format_for_pipe_format(templ->format),
+                 .width = templ->width0,
+                 .height = templ->height0,
+                 .depth = templ->depth0,
+                 .levels = templ->last_level + 1,
+                 .array_len = templ->array_size,
+                 .samples = MAX2(templ->nr_samples, 1),
+                 .min_alignment_B = 0,
+                 .row_pitch_B = 0,
+                 .usage = isl_usage,
+                 .tiling_flags = 1 << mod_info->tiling);
+
+   assert(res->bo->tiling_mode == isl_tiling_to_i915_tiling(res->surf.tiling));
+
+   return &res->base;
+
+fail:
+   iris_resource_destroy(pscreen, &res->base);
+   return NULL;
+}
+
+static boolean
+iris_resource_get_handle(struct pipe_screen *pscreen,
+                         struct pipe_context *ctx,
+                         struct pipe_resource *resource,
+                         struct winsys_handle *whandle,
+                         unsigned usage)
+{
+   struct iris_resource *res = (struct iris_resource *)resource;
+
+   whandle->stride = res->surf.row_pitch_B;
+
+   switch (whandle->type) {
+   case WINSYS_HANDLE_TYPE_SHARED:
+      return iris_bo_flink(res->bo, &whandle->handle) > 0;
+   case WINSYS_HANDLE_TYPE_KMS:
+      return iris_bo_export_gem_handle(res->bo);
+   case WINSYS_HANDLE_TYPE_FD:
+      return iris_bo_export_dmabuf(res->bo, (int *) &whandle->handle) > 0;
+   }
+
+   return false;
+}
+
+void
+iris_init_screen_resource_functions(struct pipe_screen *pscreen)
+{
+   pscreen->resource_create_with_modifiers =
+      iris_resource_create_with_modifiers;
+   pscreen->resource_create = iris_resource_create;
+   pscreen->resource_from_handle = iris_resource_from_handle;
+   pscreen->resource_get_handle = iris_resource_get_handle;
+   pscreen->resource_destroy = iris_resource_destroy;
+}
diff --git a/src/gallium/drivers/iris/iris_resource.h b/src/gallium/drivers/iris/iris_resource.h
new file mode 100644 (file)
index 0000000..8e4d8d4
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef IRIS_RESOURCE_H
+#define IRIS_RESOURCE_H
+
+#include "pipe/p_state.h"
+#include "intel/isl/isl.h"
+
+struct iris_resource {
+   struct pipe_resource        base;
+   struct isl_surf surf;
+   struct iris_bo *bo;
+};
+
+enum isl_format iris_isl_format_for_pipe_format(enum pipe_format pf);
+
+void iris_init_screen_resource_functions(struct pipe_screen *pscreen);
+
+#endif
diff --git a/src/gallium/drivers/iris/iris_screen.c b/src/gallium/drivers/iris/iris_screen.c
new file mode 100644 (file)
index 0000000..9d67984
--- /dev/null
@@ -0,0 +1,524 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "util/u_inlines.h"
+#include "util/u_format.h"
+#include "util/u_upload_mgr.h"
+#include "util/ralloc.h"
+#include "drm-uapi/i915_drm.h"
+#include "iris_context.h"
+#include "iris_resource.h"
+#include "iris_screen.h"
+#include "intel/compiler/brw_compiler.h"
+
+static void
+iris_flush_frontbuffer(struct pipe_screen *_screen,
+                       struct pipe_resource *resource,
+                       unsigned level, unsigned layer,
+                       void *context_private, struct pipe_box *box)
+{
+}
+
+static const char *
+iris_get_vendor(struct pipe_screen *pscreen)
+{
+   return "Mesa Project";
+}
+
+static const char *
+iris_get_device_vendor(struct pipe_screen *pscreen)
+{
+   return "Intel";
+}
+
+static const char *
+iris_get_name(struct pipe_screen *pscreen)
+{
+   struct iris_screen *screen = (struct iris_screen *)pscreen;
+   const char *chipset;
+
+   switch (screen->pci_id) {
+#undef CHIPSET
+#define CHIPSET(id, symbol, str) case id: chipset = str; break;
+#include "pci_ids/i965_pci_ids.h"
+   default:
+      chipset = "Unknown Intel Chipset";
+      break;
+   }
+   return &chipset[9];
+}
+
+static int
+iris_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
+{
+   struct iris_screen *screen = (struct iris_screen *)pscreen;
+
+   switch (param) {
+   case PIPE_CAP_NPOT_TEXTURES:
+   case PIPE_CAP_ANISOTROPIC_FILTER:
+   case PIPE_CAP_POINT_SPRITE:
+   case PIPE_CAP_OCCLUSION_QUERY:
+   case PIPE_CAP_QUERY_TIME_ELAPSED:
+   case PIPE_CAP_TEXTURE_SWIZZLE:
+   case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
+   case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+   case PIPE_CAP_SM3:
+   case PIPE_CAP_PRIMITIVE_RESTART:
+   case PIPE_CAP_INDEP_BLEND_ENABLE:
+   case PIPE_CAP_INDEP_BLEND_FUNC:
+   case PIPE_CAP_RGB_OVERRIDE_DST_ALPHA_BLEND:
+   case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
+   case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
+   case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
+   case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
+   case PIPE_CAP_DEPTH_CLIP_DISABLE:
+   case PIPE_CAP_SHADER_STENCIL_EXPORT:
+   case PIPE_CAP_TGSI_INSTANCEID:
+   case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
+   case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
+   case PIPE_CAP_SEAMLESS_CUBE_MAP:
+   case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
+   case PIPE_CAP_CONDITIONAL_RENDER:
+   case PIPE_CAP_TEXTURE_BARRIER:
+   case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
+   case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
+   case PIPE_CAP_COMPUTE:
+   case PIPE_CAP_START_INSTANCE:
+   case PIPE_CAP_QUERY_TIMESTAMP:
+   case PIPE_CAP_TEXTURE_MULTISAMPLE:
+   case PIPE_CAP_CUBE_MAP_ARRAY:
+   case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
+   case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
+   case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
+   case PIPE_CAP_TEXTURE_QUERY_LOD:
+   case PIPE_CAP_SAMPLE_SHADING:
+   case PIPE_CAP_TEXTURE_GATHER_OFFSETS:
+   case PIPE_CAP_DRAW_INDIRECT:
+   case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
+   case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT:
+   case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE:
+   case PIPE_CAP_ACCELERATED:
+   case PIPE_CAP_UMA:
+   case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
+   case PIPE_CAP_CLIP_HALFZ:
+      return true;
+
+   case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
+   case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
+   case PIPE_CAP_VERTEX_COLOR_CLAMPED:
+   case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
+   case PIPE_CAP_USER_VERTEX_BUFFERS:
+   case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
+   case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
+   case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
+   case PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY:
+   case PIPE_CAP_TGSI_TEXCOORD:
+   case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
+   case PIPE_CAP_FAKE_SW_MSAA:
+   case PIPE_CAP_VERTEXID_NOBASE:
+      return false;
+
+   case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
+      return 1;
+   case PIPE_CAP_MAX_RENDER_TARGETS:
+      return BRW_MAX_DRAW_BUFFERS;
+   case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
+   case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+      return 15; /* 16384x16384 */
+   case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+      return 12; /* 2048x2048 */
+   case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
+      return 4;
+   case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
+      return 2048;
+   case PIPE_CAP_MIN_TEXEL_OFFSET:
+      return -8;
+   case PIPE_CAP_MAX_TEXEL_OFFSET:
+      return 7;
+   case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
+      return BRW_MAX_SOL_BINDINGS / IRIS_MAX_SOL_BUFFERS;
+   case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
+      return BRW_MAX_SOL_BINDINGS;
+   case PIPE_CAP_GLSL_FEATURE_LEVEL:
+      return 460;
+   case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
+      /* 3DSTATE_CONSTANT_XS requires the start of UBOs to be 32B aligned */
+      return 32;
+   case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
+      return 64; // XXX: ?
+   case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
+      return 1;
+   case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
+      return true; // XXX: ?????
+   case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
+      return 1 << 27; /* 128MB */
+   case PIPE_CAP_MAX_VIEWPORTS:
+      return 16;
+   case PIPE_CAP_ENDIANNESS:
+      return PIPE_ENDIAN_LITTLE;
+   case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
+      return 256;
+   case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
+      return 128;
+   case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
+   case PIPE_CAP_TEXTURE_GATHER_SM5:
+      return 0; // XXX:
+   case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
+      return -32;
+   case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
+      return 31;
+   case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION:
+   case PIPE_CAP_MAX_VERTEX_STREAMS:
+      return 4;
+   case PIPE_CAP_VENDOR_ID:
+      return 0x8086;
+   case PIPE_CAP_DEVICE_ID:
+      return screen->pci_id;
+   case PIPE_CAP_VIDEO_MEMORY:
+      return 0xffffffff; // XXX: bogus
+   case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
+      return 2048;
+   case PIPE_CAP_SAMPLER_VIEW_TARGET:
+      return false; // XXX: what is this?
+   case PIPE_CAP_POLYGON_OFFSET_CLAMP:
+   case PIPE_CAP_MULTISAMPLE_Z_RESOLVE:
+   case PIPE_CAP_RESOURCE_FROM_USER_MEMORY:
+   case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
+   case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
+   case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
+   case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
+   case PIPE_CAP_DEPTH_BOUNDS_TEST:
+   case PIPE_CAP_TGSI_TXQS:
+   case PIPE_CAP_FORCE_PERSAMPLE_INTERP:
+   case PIPE_CAP_SHAREABLE_SHADERS:
+   case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
+   case PIPE_CAP_CLEAR_TEXTURE:
+   case PIPE_CAP_DRAW_PARAMETERS:
+   case PIPE_CAP_TGSI_PACK_HALF_FLOAT:
+   case PIPE_CAP_MULTI_DRAW_INDIRECT:
+   case PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS:
+   case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL:
+   case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL:
+   case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
+   case PIPE_CAP_INVALIDATE_BUFFER:
+   case PIPE_CAP_GENERATE_MIPMAP:
+   case PIPE_CAP_STRING_MARKER:
+   case PIPE_CAP_SURFACE_REINTERPRET_BLOCKS:
+   case PIPE_CAP_QUERY_BUFFER_OBJECT:
+   case PIPE_CAP_QUERY_MEMORY_INFO:
+   case PIPE_CAP_PCI_GROUP:
+   case PIPE_CAP_PCI_BUS:
+   case PIPE_CAP_PCI_DEVICE:
+   case PIPE_CAP_PCI_FUNCTION:
+   case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
+   case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
+   case PIPE_CAP_CULL_DISTANCE:
+   case PIPE_CAP_PRIMITIVE_RESTART_FOR_PATCHES:
+   case PIPE_CAP_TGSI_VOTE:
+   case PIPE_CAP_MAX_WINDOW_RECTANGLES:
+   case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED:
+   case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
+   case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
+   case PIPE_CAP_TGSI_ARRAY_COMPONENTS:
+   case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
+   case PIPE_CAP_TGSI_CAN_READ_OUTPUTS:
+   case PIPE_CAP_NATIVE_FENCE_FD:
+   case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
+   case PIPE_CAP_TGSI_FS_FBFETCH:
+   case PIPE_CAP_TGSI_MUL_ZERO_WINS:
+   case PIPE_CAP_DOUBLES:
+   case PIPE_CAP_INT64:
+   case PIPE_CAP_INT64_DIVMOD:
+   case PIPE_CAP_TGSI_TEX_TXF_LZ:
+   case PIPE_CAP_TGSI_CLOCK:
+   case PIPE_CAP_POLYGON_MODE_FILL_RECTANGLE:
+   case PIPE_CAP_SPARSE_BUFFER_PAGE_SIZE:
+   case PIPE_CAP_TGSI_BALLOT:
+   case PIPE_CAP_TGSI_TES_LAYER_VIEWPORT:
+   case PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX:
+   case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
+   case PIPE_CAP_POST_DEPTH_COVERAGE:
+   case PIPE_CAP_BINDLESS_TEXTURE:
+   case PIPE_CAP_NIR_SAMPLERS_AS_DEREF:
+   case PIPE_CAP_QUERY_SO_OVERFLOW:
+   case PIPE_CAP_MEMOBJ:
+   case PIPE_CAP_LOAD_CONSTBUF:
+   case PIPE_CAP_TGSI_ANY_REG_AS_ADDRESS:
+   case PIPE_CAP_TILE_RASTER_ORDER:
+   case PIPE_CAP_MAX_COMBINED_SHADER_OUTPUT_RESOURCES:
+   case PIPE_CAP_SIGNED_VERTEX_BUFFER_OFFSET:
+   case PIPE_CAP_CONTEXT_PRIORITY_MASK:
+      // XXX: TODO: fill these out
+      break;
+   }
+   return 0;
+}
+
+static float
+iris_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
+{
+   switch (param) {
+   case PIPE_CAPF_MAX_LINE_WIDTH:
+   case PIPE_CAPF_MAX_LINE_WIDTH_AA:
+      return 7.375f;
+
+   case PIPE_CAPF_MAX_POINT_WIDTH:
+   case PIPE_CAPF_MAX_POINT_WIDTH_AA:
+      return 255.0f;
+
+   case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
+      return 16.0f;
+   case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
+      return 15.0f;
+   default:
+      unreachable("unknown param");
+   }
+}
+
+static int
+iris_get_shader_param(struct pipe_screen *pscreen,
+                      enum pipe_shader_type shader,
+                      enum pipe_shader_cap param)
+{
+   struct iris_screen *screen = (struct iris_screen *)pscreen;
+   struct brw_compiler *compiler = screen->compiler;
+
+   /* this is probably not totally correct.. but it's a start: */
+   switch (param) {
+   case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
+      return shader == PIPE_SHADER_FRAGMENT ? 1024 : 16384;
+   case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
+   case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
+   case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
+      return shader == PIPE_SHADER_FRAGMENT ? 1024 : 0;
+
+   case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
+      return UINT_MAX;
+
+   case PIPE_SHADER_CAP_MAX_INPUTS:
+      return shader == PIPE_SHADER_VERTEX ? 16 : 32;
+   case PIPE_SHADER_CAP_MAX_OUTPUTS:
+      return 32;
+   case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE:
+      return 16 * 1024 * sizeof(float);
+   case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
+      return 16;
+   case PIPE_SHADER_CAP_MAX_TEMPS:
+      return 256; /* GL_MAX_PROGRAM_TEMPORARIES_ARB */
+   case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
+      return 0;
+   case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
+      return !compiler->glsl_compiler_options[shader].EmitNoIndirectInput;
+   case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
+      return !compiler->glsl_compiler_options[shader].EmitNoIndirectOutput;
+   case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
+      return !compiler->glsl_compiler_options[shader].EmitNoIndirectTemp;
+   case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
+      return 1;
+   case PIPE_SHADER_CAP_SUBROUTINES:
+      return 0;
+   case PIPE_SHADER_CAP_INTEGERS:
+   case PIPE_SHADER_CAP_SCALAR_ISA:
+      return 1;
+   case PIPE_SHADER_CAP_INT64_ATOMICS:
+   case PIPE_SHADER_CAP_FP16:
+      return 0;
+   case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
+   case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
+   case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
+   case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
+      return IRIS_MAX_TEXTURE_SAMPLERS;
+   case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
+   case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
+      return 0;
+   case PIPE_SHADER_CAP_PREFERRED_IR:
+      return PIPE_SHADER_IR_NIR;
+   case PIPE_SHADER_CAP_SUPPORTED_IRS:
+      return 0;
+   case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
+      return 32;
+   case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
+   case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
+   case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED:
+   case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED:
+   case PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED:
+   case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED:
+   case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
+   case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
+      return 0;
+   default:
+      unreachable("unknown shader param");
+   }
+}
+
+static int
+iris_get_compute_param(struct pipe_screen *pscreen,
+                       enum pipe_shader_ir ir_type,
+                       enum pipe_compute_cap param,
+                       void *ret)
+{
+   /* TODO: compute shaders */
+   return 0;
+}
+
+static uint64_t
+iris_get_timestamp(struct pipe_screen *pscreen)
+{
+   return 0;
+}
+
+static void
+iris_destroy_screen(struct pipe_screen *pscreen)
+{
+   struct iris_screen *screen = (struct iris_screen *) pscreen;
+   ralloc_free(screen);
+}
+
+static void
+iris_fence_reference(struct pipe_screen *screen,
+                     struct pipe_fence_handle **ptr,
+                     struct pipe_fence_handle *fence)
+{
+}
+
+static boolean
+iris_fence_finish(struct pipe_screen *screen,
+                  struct pipe_context *ctx,
+                  struct pipe_fence_handle *fence,
+                  uint64_t timeout)
+{
+   return true;
+}
+
+static void
+iris_query_memory_info(struct pipe_screen *pscreen,
+                       struct pipe_memory_info *info)
+{
+}
+
+static gl_shader_stage
+stage_from_pipe(enum pipe_shader_type pstage)
+{
+   static const gl_shader_stage stages[PIPE_SHADER_TYPES] = {
+      [PIPE_SHADER_VERTEX] = MESA_SHADER_VERTEX,
+      [PIPE_SHADER_TESS_CTRL] = MESA_SHADER_TESS_CTRL,
+      [PIPE_SHADER_TESS_EVAL] = MESA_SHADER_TESS_EVAL,
+      [PIPE_SHADER_GEOMETRY] = MESA_SHADER_GEOMETRY,
+      [PIPE_SHADER_FRAGMENT] = MESA_SHADER_FRAGMENT,
+      [PIPE_SHADER_COMPUTE] = MESA_SHADER_COMPUTE,
+   };
+   return stages[pstage];
+}
+
+static const void *
+iris_get_compiler_options(struct pipe_screen *pscreen,
+                          enum pipe_shader_ir ir,
+                          enum pipe_shader_type pstage)
+{
+   struct iris_screen *screen = (struct iris_screen *) pscreen;
+   gl_shader_stage stage = stage_from_pipe(pstage);
+   assert(ir == PIPE_SHADER_IR_NIR);
+
+   return screen->compiler->glsl_compiler_options[stage].NirOptions;
+}
+
+static int
+iris_getparam(struct iris_screen *screen, int param, int *value)
+{
+   struct drm_i915_getparam gp = { .param = param, .value = value };
+
+   if (ioctl(screen->fd, DRM_IOCTL_I915_GETPARAM, &gp) == -1)
+      return -errno;
+
+   return 0;
+}
+
+static bool
+iris_getparam_boolean(struct iris_screen *screen, int param)
+{
+   int value = 0;
+   return (iris_getparam(screen, param, &value) == 0) && value;
+}
+
+static int
+iris_getparam_integer(struct iris_screen *screen, int param)
+{
+   int value = -1;
+
+   if (iris_getparam(screen, param, &value) == 0)
+      return value;
+
+   return -1;
+}
+
+struct pipe_screen *
+iris_screen_create(int fd)
+{
+   struct iris_screen *screen = rzalloc(NULL, struct iris_screen);
+   if (!screen)
+      return NULL;
+
+   screen->fd = fd;
+   screen->pci_id = iris_getparam_integer(screen, I915_PARAM_CHIPSET_ID);
+
+   if (!gen_get_device_info(screen->pci_id, &screen->devinfo))
+      return NULL;
+
+   screen->bufmgr = iris_bufmgr_init(&screen->devinfo, fd);
+   if (!screen->bufmgr)
+      return NULL;
+
+   bool hw_has_swizzling = false; // XXX: detect?
+   isl_device_init(&screen->isl_dev, &screen->devinfo, hw_has_swizzling);
+
+   screen->compiler = brw_compiler_create(screen, &screen->devinfo);
+
+   struct pipe_screen *pscreen = &screen->base;
+
+   iris_init_screen_resource_functions(pscreen);
+
+   pscreen->destroy = iris_destroy_screen;
+   pscreen->get_name = iris_get_name;
+   pscreen->get_vendor = iris_get_vendor;
+   pscreen->get_device_vendor = iris_get_device_vendor;
+   pscreen->get_param = iris_get_param;
+   pscreen->get_shader_param = iris_get_shader_param;
+   pscreen->get_compute_param = iris_get_compute_param;
+   pscreen->get_paramf = iris_get_paramf;
+   pscreen->get_compiler_options = iris_get_compiler_options;
+   pscreen->is_format_supported = iris_is_format_supported;
+   pscreen->context_create = iris_create_context;
+   pscreen->flush_frontbuffer = iris_flush_frontbuffer;
+   pscreen->get_timestamp = iris_get_timestamp;
+   pscreen->fence_reference = iris_fence_reference;
+   pscreen->fence_finish = iris_fence_finish;
+   pscreen->query_memory_info = iris_query_memory_info;
+
+   return pscreen;
+}
diff --git a/src/gallium/drivers/iris/iris_screen.h b/src/gallium/drivers/iris/iris_screen.h
new file mode 100644 (file)
index 0000000..5484e53
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef IRIS_SCREEN_H
+#define IRIS_SCREEN_H
+
+#include "pipe/p_screen.h"
+#include "state_tracker/drm_driver.h"
+#include "util/slab.h"
+#include "util/u_screen.h"
+#include "intel/dev/gen_device_info.h"
+#include "intel/isl/isl.h"
+#include "iris_bufmgr.h"
+
+struct iris_bo;
+
+#define IRIS_MAX_TEXTURE_SAMPLERS 32
+#define IRIS_MAX_SOL_BUFFERS 4
+
+struct iris_screen {
+   struct pipe_screen base;
+   int fd;
+   int pci_id;
+
+   unsigned program_id;
+
+   struct gen_device_info devinfo;
+   struct isl_device isl_dev;
+   struct iris_bufmgr *bufmgr;
+   struct brw_compiler *compiler;
+};
+
+struct pipe_screen *iris_screen_create(int fd);
+
+boolean
+iris_is_format_supported(struct pipe_screen *pscreen,
+                         enum pipe_format format,
+                         enum pipe_texture_target target,
+                         unsigned sample_count,
+                         unsigned storage_sample_count,
+                         unsigned usage);
+
+#endif
diff --git a/src/gallium/drivers/iris/iris_state.c b/src/gallium/drivers/iris/iris_state.c
new file mode 100644 (file)
index 0000000..cfd0826
--- /dev/null
@@ -0,0 +1,920 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+#include "pipe/p_context.h"
+#include "pipe/p_screen.h"
+#include "util/u_inlines.h"
+#include "util/u_transfer.h"
+#include "intel/compiler/brw_compiler.h"
+#include "iris_context.h"
+#include "iris_resource.h"
+
+#define __gen_address_type unsigned
+#define __gen_user_data void
+
+static uint64_t
+__gen_combine_address(void *user_data, void *location,
+                      unsigned address, uint32_t delta)
+{
+   return delta;
+}
+
+#define __genxml_cmd_length(cmd) cmd ## _length
+#define __genxml_cmd_length_bias(cmd) cmd ## _length_bias
+#define __genxml_cmd_header(cmd) cmd ## _header
+#define __genxml_cmd_pack(cmd) cmd ## _pack
+
+#define iris_pack_command(cmd, dst, name)                         \
+   for (struct cmd name = { __genxml_cmd_header(cmd) },           \
+        *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
+        __genxml_cmd_pack(cmd)(NULL, (void *)dst, &name),         \
+        _dst = NULL)
+
+#define iris_pack_state(cmd, dst, name)                           \
+   for (struct cmd name = {},                                     \
+        *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
+        __genxml_cmd_pack(cmd)(NULL, (void *)_dst, &name),        \
+        _dst = NULL)
+
+#include "genxml/genX_pack.h"
+#include "genxml/gen_macros.h"
+
+#define MOCS_WB (2 << 1)
+
+UNUSED static void pipe_asserts()
+{
+#define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
+
+   /* pipe_logicop happens to match the hardware. */
+   PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
+   PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
+   PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
+   PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
+   PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
+   PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
+   PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
+   PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
+   PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
+   PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
+   PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
+   PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
+   PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
+   PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
+   PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
+   PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
+
+   /* pipe_blend_func happens to match the hardware. */
+   PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
+   PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
+
+   /* pipe_blend_func happens to match the hardware. */
+   PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
+   PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
+   PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
+   PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
+   PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
+
+   /* pipe_stencil_op happens to match the hardware. */
+   PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
+   PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
+   PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
+   PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
+   PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
+   PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
+   PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
+   PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
+#undef PIPE_ASSERT
+}
+
+static unsigned
+translate_compare_func(enum pipe_compare_func pipe_func)
+{
+   static const unsigned map[] = {
+      [PIPE_FUNC_NEVER]    = COMPAREFUNCTION_NEVER,
+      [PIPE_FUNC_LESS]     = COMPAREFUNCTION_LESS,
+      [PIPE_FUNC_EQUAL]    = COMPAREFUNCTION_EQUAL,
+      [PIPE_FUNC_LEQUAL]   = COMPAREFUNCTION_LEQUAL,
+      [PIPE_FUNC_GREATER]  = COMPAREFUNCTION_GREATER,
+      [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
+      [PIPE_FUNC_GEQUAL]   = COMPAREFUNCTION_GEQUAL,
+      [PIPE_FUNC_ALWAYS]   = COMPAREFUNCTION_ALWAYS,
+   };
+   return map[pipe_func];
+}
+
+static unsigned
+translate_shadow_func(enum pipe_compare_func pipe_func)
+{
+   /* Gallium specifies the result of shadow comparisons as:
+    *
+    *    1 if ref <op> texel,
+    *    0 otherwise.
+    *
+    * The hardware does:
+    *
+    *    0 if texel <op> ref,
+    *    1 otherwise.
+    *
+    * So we need to flip the operator and also negate.
+    */
+   static const unsigned map[] = {
+      [PIPE_FUNC_NEVER]    = PREFILTEROPALWAYS,
+      [PIPE_FUNC_LESS]     = PREFILTEROPLEQUAL,
+      [PIPE_FUNC_EQUAL]    = PREFILTEROPNOTEQUAL,
+      [PIPE_FUNC_LEQUAL]   = PREFILTEROPLESS,
+      [PIPE_FUNC_GREATER]  = PREFILTEROPGEQUAL,
+      [PIPE_FUNC_NOTEQUAL] = PREFILTEROPEQUAL,
+      [PIPE_FUNC_GEQUAL]   = PREFILTEROPGREATER,
+      [PIPE_FUNC_ALWAYS]   = PREFILTEROPNEVER,
+   };
+   return map[pipe_func];
+}
+
+static unsigned
+translate_cull_mode(unsigned pipe_face)
+{
+   static const unsigned map[4] = {
+      [PIPE_FACE_NONE]           = CULLMODE_NONE,
+      [PIPE_FACE_FRONT]          = CULLMODE_FRONT,
+      [PIPE_FACE_BACK]           = CULLMODE_BACK,
+      [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
+   };
+   return map[pipe_face];
+}
+
+static unsigned
+translate_fill_mode(unsigned pipe_polymode)
+{
+   static const unsigned map[4] = {
+      [PIPE_POLYGON_MODE_FILL]           = FILL_MODE_SOLID,
+      [PIPE_POLYGON_MODE_LINE]           = FILL_MODE_WIREFRAME,
+      [PIPE_POLYGON_MODE_POINT]          = FILL_MODE_POINT,
+      [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
+   };
+   return map[pipe_polymode];
+}
+
+static void
+iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
+{
+}
+
+static void
+iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *info)
+{
+}
+
+static void
+iris_set_blend_color(struct pipe_context *ctx,
+                     const struct pipe_blend_color *state)
+{
+   struct iris_context *ice = (struct iris_context *) ctx;
+
+   memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
+   ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
+}
+
+struct iris_blend_state {
+   uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
+   uint32_t blend_state[GENX(BLEND_STATE_length)];
+   uint32_t blend_entries[BRW_MAX_DRAW_BUFFERS *
+                          GENX(BLEND_STATE_ENTRY_length)];
+};
+
+static void *
+iris_create_blend_state(struct pipe_context *ctx,
+                        const struct pipe_blend_state *state)
+{
+   struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
+
+   iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
+      bs.AlphaToCoverageEnable = state->alpha_to_coverage;
+      bs.IndependentAlphaBlendEnable = state->independent_blend_enable;
+      bs.AlphaToOneEnable = state->alpha_to_one;
+      bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
+      bs.ColorDitherEnable = state->dither;
+      //bs.AlphaTestEnable = <comes from alpha state> :(
+      //bs.AlphaTestFunction = <comes from alpha state> :(
+   }
+
+   iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
+      //pb.HasWriteableRT = <comes from somewhere> :(
+      //pb.AlphaTestEnable = <comes from alpha state> :(
+      pb.AlphaToCoverageEnable = state->alpha_to_coverage;
+      pb.IndependentAlphaBlendEnable = state->independent_blend_enable;
+
+      pb.ColorBufferBlendEnable = state->rt[0].blend_enable;
+
+      pb.SourceBlendFactor           = state->rt[0].rgb_src_factor;
+      pb.SourceAlphaBlendFactor      = state->rt[0].alpha_func;
+      pb.DestinationBlendFactor      = state->rt[0].rgb_dst_factor;
+      pb.DestinationAlphaBlendFactor = state->rt[0].alpha_dst_factor;
+   }
+
+   for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
+      iris_pack_state(GENX(BLEND_STATE_ENTRY), &cso->blend_entries[i], be) {
+         be.LogicOpEnable = state->logicop_enable;
+         be.LogicOpFunction = state->logicop_func;
+
+         be.PreBlendSourceOnlyClampEnable = false;
+         be.ColorClampRange = COLORCLAMP_RTFORMAT;
+         be.PreBlendColorClampEnable = true;
+         be.PostBlendColorClampEnable = true;
+
+         be.ColorBufferBlendEnable = state->rt[i].blend_enable;
+
+         be.ColorBlendFunction          = state->rt[i].rgb_func;
+         be.AlphaBlendFunction          = state->rt[i].alpha_func;
+         be.SourceBlendFactor           = state->rt[i].rgb_src_factor;
+         be.SourceAlphaBlendFactor      = state->rt[i].alpha_func;
+         be.DestinationBlendFactor      = state->rt[i].rgb_dst_factor;
+         be.DestinationAlphaBlendFactor = state->rt[i].alpha_dst_factor;
+
+         be.WriteDisableRed   = state->rt[i].colormask & PIPE_MASK_R;
+         be.WriteDisableGreen = state->rt[i].colormask & PIPE_MASK_G;
+         be.WriteDisableBlue  = state->rt[i].colormask & PIPE_MASK_B;
+         be.WriteDisableAlpha = state->rt[i].colormask & PIPE_MASK_A;
+      }
+   }
+
+   return cso;
+}
+
+struct iris_depth_stencil_alpha_state {
+   uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
+   uint32_t cc_vp[GENX(CC_VIEWPORT_length)];
+
+   struct pipe_alpha_state alpha; /* to BLEND_STATE, 3DSTATE_PS_BLEND */
+};
+
+static void *
+iris_create_dsa_state(struct pipe_context *ctx,
+                      const struct pipe_depth_stencil_alpha_state *state)
+{
+   struct iris_depth_stencil_alpha_state *cso =
+      malloc(sizeof(struct iris_depth_stencil_alpha_state));
+
+   cso->alpha = state->alpha;
+
+   bool two_sided_stencil = state->stencil[1].enabled;
+
+   /* The state tracker needs to optimize away EQUAL writes for us. */
+   assert(!(state->depth.func == PIPE_FUNC_EQUAL && state->depth.writemask));
+
+   iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
+      wmds.StencilFailOp = state->stencil[0].fail_op;
+      wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
+      wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
+      wmds.StencilTestFunction =
+         translate_compare_func(state->stencil[0].func);
+      wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
+      wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
+      wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
+      wmds.BackfaceStencilTestFunction =
+         translate_compare_func(state->stencil[1].func);
+      wmds.DepthTestFunction = translate_compare_func(state->depth.func);
+      wmds.DoubleSidedStencilEnable = two_sided_stencil;
+      wmds.StencilTestEnable = state->stencil[0].enabled;
+      wmds.StencilBufferWriteEnable =
+         state->stencil[0].writemask != 0 ||
+         (two_sided_stencil && state->stencil[1].writemask != 0);
+      wmds.DepthTestEnable = state->depth.enabled;
+      wmds.DepthBufferWriteEnable = state->depth.writemask;
+      wmds.StencilTestMask = state->stencil[0].valuemask;
+      wmds.StencilWriteMask = state->stencil[0].writemask;
+      wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
+      wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
+      //wmds.StencilReferenceValue = <comes from elsewhere>
+      //wmds.BackfaceStencilReferenceValue = <comes from elsewhere>
+   }
+
+   iris_pack_state(GENX(CC_VIEWPORT), cso->cc_vp, ccvp) {
+      ccvp.MinimumDepth = state->depth.bounds_min;
+      ccvp.MaximumDepth = state->depth.bounds_max;
+   }
+
+   return cso;
+}
+
+struct iris_rasterizer_state {
+   uint32_t sf[GENX(3DSTATE_SF_length)];
+   uint32_t clip[GENX(3DSTATE_CLIP_length)];
+   uint32_t raster[GENX(3DSTATE_RASTER_length)];
+   uint32_t wm[GENX(3DSTATE_WM_length)];
+
+   bool flatshade; /* for shader state */
+   bool light_twoside; /* for shader state */
+   bool rasterizer_discard; /* for 3DSTATE_STREAMOUT */
+   enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
+
+   uint8_t line_stipple_factor;
+   uint16_t line_stipple_pattern;
+};
+
+static void *
+iris_create_rasterizer_state(struct pipe_context *ctx,
+                             const struct pipe_rasterizer_state *state)
+{
+   struct iris_rasterizer_state *cso =
+      malloc(sizeof(struct iris_rasterizer_state));
+
+#if 0
+   sprite_coord_mode -> SBE PointSpriteTextureCoordinateOrigin
+   sprite_coord_enable -> SBE PointSpriteTextureCoordinateEnable
+   point_quad_rasterization -> SBE?
+
+   not necessary?
+   {
+      poly_smooth
+      force_persample_interp - ?
+      bottom_edge_rule
+
+      offset_units_unscaled - cap not exposed
+   }
+
+   unsigned line_stipple_factor:8;  /**< [1..256] actually */
+   unsigned line_stipple_pattern:16;
+   #endif
+
+   cso->flatshade = state->flatshade;
+   cso->light_twoside = state->light_twoside;
+   cso->rasterizer_discard = state->rasterizer_discard;
+   cso->line_stipple_factor = state->line_stipple_factor;
+   cso->line_stipple_pattern = state->line_stipple_pattern;
+   // for 3DSTATE_MULTISAMPLE, if we want it.
+   //cso->half_pixel_center = state->half_pixel_center;
+
+   iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
+      sf.StatisticsEnable = true;
+      sf.ViewportTransformEnable = true;
+      sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
+      sf.LineEndCapAntialiasingRegionWidth =
+         state->line_smooth ? _10pixels : _05pixels;
+      sf.LastPixelEnable = state->line_last_pixel;
+      sf.LineWidth = state->line_width;
+      sf.SmoothPointEnable = state->point_smooth;
+      sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
+      sf.PointWidth = state->point_size;
+
+      if (state->flatshade_first) {
+         sf.TriangleStripListProvokingVertexSelect = 2;
+         sf.TriangleFanProvokingVertexSelect = 2;
+         sf.LineStripListProvokingVertexSelect = 1;
+      } else {
+         sf.TriangleFanProvokingVertexSelect = 1;
+      }
+   }
+
+   /* COMPLETE! */
+   iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
+      rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
+      rr.CullMode = translate_cull_mode(state->cull_face);
+      rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
+      rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
+      rr.DXMultisampleRasterizationEnable = state->multisample;
+      rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
+      rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
+      rr.GlobalDepthOffsetEnablePoint = state->offset_point;
+      rr.GlobalDepthOffsetConstant = state->offset_units;
+      rr.GlobalDepthOffsetScale = state->offset_scale;
+      rr.GlobalDepthOffsetClamp = state->offset_clamp;
+      rr.SmoothPointEnable = state->point_smooth;
+      rr.AntialiasingEnable = state->line_smooth;
+      rr.ScissorRectangleEnable = state->scissor;
+      rr.ViewportZNearClipTestEnable = state->depth_clip_near;
+      rr.ViewportZFarClipTestEnable = state->depth_clip_far;
+      //rr.ConservativeRasterizationEnable = not yet supported by Gallium...
+   }
+
+   iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
+      cl.StatisticsEnable = true;
+      cl.EarlyCullEnable = true;
+      cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
+      cl.ForceUserClipDistanceClipTestEnableBitmask = true;
+      cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
+      cl.GuardbandClipTestEnable = true;
+      cl.ClipMode = CLIPMODE_NORMAL;
+      cl.ClipEnable = true;
+      cl.ViewportXYClipTestEnable = state->point_tri_clip;
+      cl.MinimumPointWidth = 0.125;
+      cl.MaximumPointWidth = 255.875;
+      //.NonPerspectiveBarycentricEnable = <comes from FS prog> :(
+      //.ForceZeroRTAIndexEnable = <comes from FB layers being 0>
+
+      if (state->flatshade_first) {
+         cl.TriangleStripListProvokingVertexSelect = 2;
+         cl.TriangleFanProvokingVertexSelect = 2;
+         cl.LineStripListProvokingVertexSelect = 1;
+      } else {
+         cl.TriangleFanProvokingVertexSelect = 1;
+      }
+   }
+
+   iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
+      wm.LineAntialiasingRegionWidth = _10pixels;
+      wm.LineEndCapAntialiasingRegionWidth = _05pixels;
+      wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
+      wm.StatisticsEnable = true;
+      wm.LineStippleEnable = state->line_stipple_enable;
+      wm.PolygonStippleEnable = state->poly_stipple_enable;
+      // wm.BarycentricInterpolationMode = <comes from FS program> :(
+      // wm.EarlyDepthStencilControl = <comes from FS program> :(
+   }
+
+   return cso;
+}
+
+static uint32_t
+translate_wrap(unsigned pipe_wrap)
+{
+   static const unsigned map[] = {
+      [PIPE_TEX_WRAP_REPEAT]                 = TCM_WRAP,
+      [PIPE_TEX_WRAP_CLAMP]                  = TCM_HALF_BORDER,
+      [PIPE_TEX_WRAP_CLAMP_TO_EDGE]          = TCM_CLAMP,
+      [PIPE_TEX_WRAP_CLAMP_TO_BORDER]        = TCM_CLAMP_BORDER,
+      [PIPE_TEX_WRAP_MIRROR_REPEAT]          = TCM_MIRROR,
+      [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE]   = TCM_MIRROR_ONCE,
+      [PIPE_TEX_WRAP_MIRROR_CLAMP]           = -1, // XXX: ???
+      [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1, // XXX: ???
+   };
+   return map[pipe_wrap];
+}
+
+/**
+ * Return true if the given wrap mode requires the border color to exist.
+ */
+static bool
+wrap_mode_needs_border_color(unsigned wrap_mode)
+{
+   return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
+}
+
+static unsigned
+translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
+{
+   static const unsigned map[] = {
+      [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
+      [PIPE_TEX_MIPFILTER_LINEAR]  = MIPFILTER_LINEAR,
+      [PIPE_TEX_MIPFILTER_NONE]    = MIPFILTER_NONE,
+   };
+   return map[pipe_mip];
+}
+
+struct iris_sampler_state {
+   struct pipe_sampler_state base;
+
+   bool needs_border_color;
+
+   uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
+};
+
+static void *
+iris_create_sampler_state(struct pipe_context *pctx,
+                          const struct pipe_sampler_state *state)
+{
+   struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
+
+   if (!cso)
+      return NULL;
+
+   STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
+   STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
+
+   unsigned wrap_s = translate_wrap(state->wrap_s);
+   unsigned wrap_t = translate_wrap(state->wrap_t);
+   unsigned wrap_r = translate_wrap(state->wrap_r);
+
+   cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
+                             wrap_mode_needs_border_color(wrap_t) ||
+                             wrap_mode_needs_border_color(wrap_r);
+
+   iris_pack_state(GENX(SAMPLER_STATE), cso->sampler_state, samp) {
+      samp.TCXAddressControlMode = wrap_s;
+      samp.TCYAddressControlMode = wrap_t;
+      samp.TCZAddressControlMode = wrap_r;
+      samp.CubeSurfaceControlMode = state->seamless_cube_map;
+      samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
+      samp.MinModeFilter = state->min_img_filter;
+      samp.MagModeFilter = state->mag_img_filter;
+      samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
+      samp.MaximumAnisotropy = RATIO21;
+
+      if (state->max_anisotropy >= 2) {
+         if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
+            samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
+            samp.AnisotropicAlgorithm = EWAApproximation;
+         }
+
+         if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
+            samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
+
+         samp.MaximumAnisotropy =
+            MIN2((state->max_anisotropy - 2) / 2, RATIO161);
+      }
+
+      /* Set address rounding bits if not using nearest filtering. */
+      if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
+         samp.UAddressMinFilterRoundingEnable = true;
+         samp.VAddressMinFilterRoundingEnable = true;
+         samp.RAddressMinFilterRoundingEnable = true;
+      }
+
+      if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
+         samp.UAddressMagFilterRoundingEnable = true;
+         samp.VAddressMagFilterRoundingEnable = true;
+         samp.RAddressMagFilterRoundingEnable = true;
+      }
+
+      if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
+         samp.ShadowFunction = translate_shadow_func(state->compare_func);
+
+      const float hw_max_lod = GEN_GEN >= 7 ? 14 : 13;
+
+      samp.LODPreClampMode = CLAMP_MODE_OGL;
+      samp.MinLOD = CLAMP(state->min_lod, 0, hw_max_lod);
+      samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
+      samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
+
+      //samp.BorderColorPointer = <<comes from elsewhere>>
+   }
+
+   return cso;
+}
+
+static struct pipe_sampler_view *
+iris_create_sampler_view(struct pipe_context *ctx,
+                         struct pipe_resource *texture,
+                         const struct pipe_sampler_view *state)
+{
+   struct pipe_sampler_view *sampler_view = CALLOC_STRUCT(pipe_sampler_view);
+
+   if (!sampler_view)
+      return NULL;
+
+   /* initialize base object */
+   *sampler_view = *state;
+   sampler_view->texture = NULL;
+   pipe_resource_reference(&sampler_view->texture, texture);
+   pipe_reference_init(&sampler_view->reference, 1);
+   sampler_view->context = ctx;
+   return sampler_view;
+}
+
+static struct pipe_surface *
+iris_create_surface(struct pipe_context *ctx,
+                    struct pipe_resource *tex,
+                    const struct pipe_surface *surf_tmpl)
+{
+   struct pipe_surface *surface = CALLOC_STRUCT(pipe_surface);
+
+   if (!surface)
+      return NULL;
+
+   pipe_reference_init(&surface->reference, 1);
+   pipe_resource_reference(&surface->texture, tex);
+   surface->context = ctx;
+   surface->format = surf_tmpl->format;
+   surface->width = tex->width0;
+   surface->height = tex->height0;
+   surface->texture = tex;
+   surface->u.tex.first_layer = surf_tmpl->u.tex.first_layer;
+   surface->u.tex.last_layer = surf_tmpl->u.tex.last_layer;
+   surface->u.tex.level = surf_tmpl->u.tex.level;
+
+   return surface;
+}
+
+static void
+iris_set_sampler_views(struct pipe_context *ctx,
+                       enum pipe_shader_type shader,
+                       unsigned start, unsigned count,
+                       struct pipe_sampler_view **views)
+{
+}
+
+static void
+iris_bind_sampler_states(struct pipe_context *ctx,
+                         enum pipe_shader_type shader,
+                         unsigned start, unsigned count,
+                         void **states)
+{
+}
+
+static void
+iris_set_clip_state(struct pipe_context *ctx,
+                    const struct pipe_clip_state *state)
+{
+}
+
+static void
+iris_set_polygon_stipple(struct pipe_context *ctx,
+                         const struct pipe_poly_stipple *state)
+{
+   struct iris_context *ice = (struct iris_context *) ctx;
+   memcpy(&ice->state.poly_stipple, state, sizeof(*state));
+   ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
+}
+
+static void
+iris_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
+{
+}
+
+static void
+iris_set_scissor_states(struct pipe_context *ctx,
+                        unsigned start_slot,
+                        unsigned num_scissors,
+                        const struct pipe_scissor_state *state)
+{
+   struct iris_context *ice = (struct iris_context *) ctx;
+
+   for (unsigned i = start_slot; i < start_slot + num_scissors; i++) {
+      ice->state.scissors[i] = *state;
+   }
+
+   ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
+}
+
+static void
+iris_set_stencil_ref(struct pipe_context *ctx,
+                     const struct pipe_stencil_ref *state)
+{
+   struct iris_context *ice = (struct iris_context *) ctx;
+   memcpy(&ice->state.stencil_ref, state, sizeof(*state));
+   ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
+}
+
+static void
+iris_set_viewport_states(struct pipe_context *ctx,
+                         unsigned start_slot,
+                         unsigned num_viewports,
+                         const struct pipe_viewport_state *state)
+{
+}
+
+static void
+iris_set_framebuffer_state(struct pipe_context *ctx,
+                           const struct pipe_framebuffer_state *state)
+{
+}
+
+static void
+iris_set_constant_buffer(struct pipe_context *ctx,
+                         enum pipe_shader_type shader, uint index,
+                         const struct pipe_constant_buffer *cb)
+{
+}
+
+
+static void
+iris_sampler_view_destroy(struct pipe_context *ctx,
+                          struct pipe_sampler_view *state)
+{
+   pipe_resource_reference(&state->texture, NULL);
+   free(state);
+}
+
+
+static void
+iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *surface)
+{
+   pipe_resource_reference(&surface->texture, NULL);
+   free(surface);
+}
+
+static void
+iris_bind_state(struct pipe_context *ctx, void *state)
+{
+}
+
+static void
+iris_delete_state(struct pipe_context *ctx, void *state)
+{
+   free(state);
+}
+
+struct iris_vertex_buffer_state {
+   uint32_t vertex_buffers[1 + 33 * GENX(VERTEX_BUFFER_STATE_length)];
+   unsigned length; /* length of 3DSTATE_VERTEX_BUFFERS in DWords */
+};
+
+static void
+iris_set_vertex_buffers(struct pipe_context *ctx,
+                        unsigned start_slot, unsigned count,
+                        const struct pipe_vertex_buffer *buffers)
+{
+   struct iris_vertex_buffer_state *cso =
+      malloc(sizeof(struct iris_vertex_buffer_state));
+
+   cso->length = 4 * count - 1;
+
+   iris_pack_state(GENX(3DSTATE_VERTEX_BUFFERS), cso->vertex_buffers, vb) {
+      vb.DWordLength = cso->length;
+   }
+
+   /* If there are no buffers, do nothing.  We can leave the stale
+    * 3DSTATE_VERTEX_BUFFERS in place - as long as there are no vertex
+    * elements that point to them, it should be fine.
+    */
+   if (!buffers)
+      return;
+
+   uint32_t *vb_pack_dest = &cso->vertex_buffers[1];
+
+   for (unsigned i = 0; i < count; i++) {
+      assert(!buffers[i].is_user_buffer);
+
+      iris_pack_state(GENX(VERTEX_BUFFER_STATE), vb_pack_dest, vb) {
+         vb.VertexBufferIndex = start_slot + i;
+         vb.MOCS = MOCS_WB;
+         vb.AddressModifyEnable = true;
+         vb.BufferPitch = buffers[i].stride;
+         //vb.BufferStartingAddress = ro_bo(bo, buffers[i].buffer_offset);
+         //vb.BufferSize = bo->size;
+      }
+
+      vb_pack_dest += GENX(VERTEX_BUFFER_STATE_length);
+   }
+
+   /* XXX: actually do something with this! */
+}
+
+struct iris_vertex_element_state {
+   uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
+   uint32_t vf_instancing[GENX(3DSTATE_VF_INSTANCING_length)];
+   unsigned count;
+};
+
+static void *
+iris_create_vertex_elements(struct pipe_context *ctx,
+                            unsigned count,
+                            const struct pipe_vertex_element *state)
+{
+   struct iris_vertex_element_state *cso =
+      malloc(sizeof(struct iris_vertex_element_state));
+
+   cso->count = count;
+
+   /* TODO:
+    *  - create edge flag one
+    *  - create SGV ones
+    *  - if those are necessary, use count + 1/2/3... OR in the length
+    */
+   iris_pack_state(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve);
+
+   uint32_t *ve_pack_dest = &cso->vertex_elements[1];
+
+   for (int i = 0; i < count; i++) {
+      iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
+         ve.VertexBufferIndex = state[i].vertex_buffer_index;
+         ve.Valid = true;
+         ve.SourceElementOffset = state[i].src_offset;
+         ve.SourceElementFormat =
+            iris_isl_format_for_pipe_format(state[i].src_format);
+      }
+
+      iris_pack_state(GENX(3DSTATE_VF_INSTANCING), cso->vf_instancing, vi) {
+         vi.VertexElementIndex = i;
+         vi.InstancingEnable = state[i].instance_divisor > 0;
+         vi.InstanceDataStepRate = state[i].instance_divisor;
+      }
+
+      ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
+   }
+
+   return cso;
+}
+
+static void *
+iris_create_compute_state(struct pipe_context *ctx,
+                          const struct pipe_compute_state *state)
+{
+   return malloc(1);
+}
+
+static struct pipe_stream_output_target *
+iris_create_stream_output_target(struct pipe_context *ctx,
+                                 struct pipe_resource *res,
+                                 unsigned buffer_offset,
+                                 unsigned buffer_size)
+{
+   struct pipe_stream_output_target *t =
+      CALLOC_STRUCT(pipe_stream_output_target);
+   if (!t)
+      return NULL;
+
+   pipe_reference_init(&t->reference, 1);
+   pipe_resource_reference(&t->buffer, res);
+   t->buffer_offset = buffer_offset;
+   t->buffer_size = buffer_size;
+   return t;
+}
+
+static void
+iris_stream_output_target_destroy(struct pipe_context *ctx,
+                                  struct pipe_stream_output_target *t)
+{
+   pipe_resource_reference(&t->buffer, NULL);
+   free(t);
+}
+
+static void
+iris_set_stream_output_targets(struct pipe_context *ctx,
+                               unsigned num_targets,
+                               struct pipe_stream_output_target **targets,
+                               const unsigned *offsets)
+{
+}
+
+void
+iris_init_state_functions(struct pipe_context *ctx)
+{
+   ctx->create_blend_state = iris_create_blend_state;
+   ctx->create_depth_stencil_alpha_state = iris_create_dsa_state;
+   ctx->create_rasterizer_state = iris_create_rasterizer_state;
+   ctx->create_sampler_state = iris_create_sampler_state;
+   ctx->create_sampler_view = iris_create_sampler_view;
+   ctx->create_surface = iris_create_surface;
+   ctx->create_vertex_elements_state = iris_create_vertex_elements;
+   ctx->create_compute_state = iris_create_compute_state;
+   ctx->bind_blend_state = iris_bind_state;
+   ctx->bind_depth_stencil_alpha_state = iris_bind_state;
+   ctx->bind_sampler_states = iris_bind_sampler_states;
+   ctx->bind_fs_state = iris_bind_state;
+   ctx->bind_rasterizer_state = iris_bind_state;
+   ctx->bind_vertex_elements_state = iris_bind_state;
+   ctx->bind_compute_state = iris_bind_state;
+   ctx->bind_tcs_state = iris_bind_state;
+   ctx->bind_tes_state = iris_bind_state;
+   ctx->bind_gs_state = iris_bind_state;
+   ctx->bind_vs_state = iris_bind_state;
+   ctx->delete_blend_state = iris_delete_state;
+   ctx->delete_depth_stencil_alpha_state = iris_delete_state;
+   ctx->delete_fs_state = iris_delete_state;
+   ctx->delete_rasterizer_state = iris_delete_state;
+   ctx->delete_sampler_state = iris_delete_state;
+   ctx->delete_vertex_elements_state = iris_delete_state;
+   ctx->delete_compute_state = iris_delete_state;
+   ctx->delete_tcs_state = iris_delete_state;
+   ctx->delete_tes_state = iris_delete_state;
+   ctx->delete_gs_state = iris_delete_state;
+   ctx->delete_vs_state = iris_delete_state;
+   ctx->set_blend_color = iris_set_blend_color;
+   ctx->set_clip_state = iris_set_clip_state;
+   ctx->set_constant_buffer = iris_set_constant_buffer;
+   ctx->set_sampler_views = iris_set_sampler_views;
+   ctx->set_framebuffer_state = iris_set_framebuffer_state;
+   ctx->set_polygon_stipple = iris_set_polygon_stipple;
+   ctx->set_sample_mask = iris_set_sample_mask;
+   ctx->set_scissor_states = iris_set_scissor_states;
+   ctx->set_stencil_ref = iris_set_stencil_ref;
+   ctx->set_vertex_buffers = iris_set_vertex_buffers;
+   ctx->set_viewport_states = iris_set_viewport_states;
+   ctx->sampler_view_destroy = iris_sampler_view_destroy;
+   ctx->surface_destroy = iris_surface_destroy;
+   ctx->draw_vbo = iris_draw_vbo;
+   ctx->launch_grid = iris_launch_grid;
+   ctx->create_stream_output_target = iris_create_stream_output_target;
+   ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
+   ctx->set_stream_output_targets = iris_set_stream_output_targets;
+}
diff --git a/src/gallium/drivers/iris/meson.build b/src/gallium/drivers/iris/meson.build
new file mode 100644 (file)
index 0000000..4728bad
--- /dev/null
@@ -0,0 +1,68 @@
+# Copyright © 2017 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+files_libiris = files(
+  'iris_batch.c',
+  'iris_batch.h',
+  'iris_bufmgr.c',
+  'iris_bufmgr.h',
+  'iris_context.h',
+  'iris_formats.c',
+  'iris_pipe.c',
+  'iris_program.c',
+  'iris_resource.c',
+  'iris_resource.h',
+  'iris_screen.c',
+  'iris_screen.h',
+)
+
+iris_gen_libs = []
+foreach v : ['90', '100']
+  _lib = static_library(
+    'libiris_gen@0@'.format(v),
+    ['iris_state.c', gen_xml_pack],
+    include_directories : [inc_common, inc_intel],
+    c_args : [c_vis_args, no_override_init_args,
+              '-DGEN_VERSIONx10=@0@'.format(v)],
+    dependencies : [dep_libdrm],
+  )
+  iris_gen_libs += _lib
+endforeach
+
+libiris = static_library(
+  'iris',
+  [files_libiris, gen_xml_pack, nir_opcodes_h, nir_builder_opcodes_h],
+  include_directories : [
+    inc_src, inc_include, inc_gallium, inc_gallium_aux, inc_intel, inc_nir,
+    inc_gallium_drivers,
+    # these should not be necessary, but main/macros.h...
+    inc_mesa, inc_mapi
+  ],
+  c_args : [c_vis_args],
+  cpp_args : [cpp_vis_args],
+  dependencies : [dep_libdrm, dep_valgrind],
+  link_with : [iris_gen_libs, libintel_common, libintel_compiler, libintel_dev,
+               libisl, libblorp],
+)
+
+driver_iris = declare_dependency(
+  compile_args : '-DGALLIUM_IRIS',
+  link_with : [libiris, libiriswinsys],
+)
index 1626cd524fecafd3c8c7bc5346f00740fd303972..ad685632a028747f19b87c6c8c6311fc2ce09700 100644 (file)
@@ -118,6 +118,12 @@ if with_gallium_tegra
 else
   driver_tegra = declare_dependency()
 endif
+if with_gallium_iris
+  subdir('winsys/iris/drm')
+  subdir('drivers/iris')
+else
+  driver_iris = declare_dependency()
+endif
 if with_gallium_i915
   subdir('winsys/i915/drm')
   subdir('drivers/i915')
index 43f7ac2c22cd20f1aebf5ba394b2bfc1aedb3f4e..045b8111592fd3e8c0e792777b7a14b04ea4df74 100644 (file)
@@ -58,7 +58,7 @@ libgallium_dri = shared_library(
     driver_swrast, driver_r300, driver_r600, driver_radeonsi, driver_nouveau,
     driver_kmsro, driver_v3d, driver_vc4, driver_freedreno, driver_etnaviv,
     driver_tegra, driver_i915, driver_svga, driver_virgl,
-    driver_swr, driver_panfrost
+    driver_swr, driver_panfrost, driver_iris
   ],
 )
 
@@ -85,6 +85,7 @@ foreach d : [[with_gallium_kmsro, [
              [with_gallium_panfrost, 'panfrost_dri.so'],
              [with_gallium_etnaviv, 'etnaviv_dri.so'],
              [with_gallium_tegra, 'tegra_dri.so'],
+             [with_gallium_iris, 'iris_dri.so'],
              [with_gallium_i915, 'i915_dri.so'],
              [with_gallium_r300, 'r300_dri.so'],
              [with_gallium_r600, 'r600_dri.so'],
index 2d83d88324f8c22d14e79d609b16414be85b54f3..e5baf754487c754ad4594b62cf9ecbd33aee69b6 100644 (file)
@@ -38,8 +38,8 @@ PUBLIC const __DRIextension **__driDriverGetExtensions_kms_swrast(void)
 DEFINE_LOADER_DRM_ENTRYPOINT(i915)
 #endif
 
-#if defined(GALLIUM_ILO)
-DEFINE_LOADER_DRM_ENTRYPOINT(i965)
+#if defined(GALLIUM_IRIS)
+DEFINE_LOADER_DRM_ENTRYPOINT(iris)
 #endif
 
 #if defined(GALLIUM_NOUVEAU)
diff --git a/src/gallium/winsys/iris/drm/iris_drm_public.h b/src/gallium/winsys/iris/drm/iris_drm_public.h
new file mode 100644 (file)
index 0000000..17c0b2e
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef IRIS_DRM_PUBLIC_H
+#define IRIS_DRM_PUBLIC_H
+
+struct pipe_screen;
+
+struct pipe_screen *iris_drm_screen_create(int drm_fd);
+
+#endif /* IRIS_DRM_PUBLIC_H */
diff --git a/src/gallium/winsys/iris/drm/iris_drm_winsys.c b/src/gallium/winsys/iris/drm/iris_drm_winsys.c
new file mode 100644 (file)
index 0000000..49c7c0f
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "iris_drm_public.h"
+#include "iris/iris_screen.h"
+
+struct pipe_screen *
+iris_drm_screen_create(int fd)
+{
+   return iris_screen_create(fcntl(fd, F_DUPFD_CLOEXEC, 3));
+}
diff --git a/src/gallium/winsys/iris/drm/meson.build b/src/gallium/winsys/iris/drm/meson.build
new file mode 100644 (file)
index 0000000..3f8e2a8
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright © 2017 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+libiriswinsys = static_library(
+  'iriswinsys',
+  files('iris_drm_winsys.c'),
+  include_directories : [
+    inc_src, inc_include,
+    inc_gallium, inc_gallium_aux, inc_gallium_drivers,
+  ],
+  c_args : [c_vis_args],
+)
index c582aae709bdac76e3d378867a8fc356337151c2..99fe4ad216191140dac77b7b7c1034f6adc3c3a5 100644 (file)
@@ -23,6 +23,14 @@ static const int i965_chip_ids[] = {
 #undef CHIPSET
 };
 
+static const int iris_chip_ids[] = {
+#define CHIPSET(chip, family, name) chip,
+#define IRIS 1
+#include "pci_ids/i965_pci_ids.h"
+#undef IRIS
+#undef CHIPSET
+};
+
 static const int r100_chip_ids[] = {
 #define CHIPSET(chip, name, family) chip,
 #include "pci_ids/radeon_pci_ids.h"
@@ -76,6 +84,7 @@ static const struct {
 } driver_map[] = {
    { 0x8086, "i915", i915_chip_ids, ARRAY_SIZE(i915_chip_ids) },
    { 0x8086, "i965", i965_chip_ids, ARRAY_SIZE(i965_chip_ids) },
+   { 0x8086, "iris", iris_chip_ids, ARRAY_SIZE(iris_chip_ids) },
    { 0x1002, "radeon", r100_chip_ids, ARRAY_SIZE(r100_chip_ids) },
    { 0x1002, "r200", r200_chip_ids, ARRAY_SIZE(r200_chip_ids) },
    { 0x1002, "r300", r300_chip_ids, ARRAY_SIZE(r300_chip_ids) },
index d9fe8daa146d637b23e3dab694abe7911a66b065..60b0d64019303536e69a537b746293e422f99bda 100644 (file)
@@ -67,7 +67,7 @@ endif
 if with_gallium_freedreno
   subdir('freedreno')
 endif
-if with_dri_i965 or with_intel_vk
+if with_dri_i965 or with_intel_vk or with_gallium_iris
   subdir('intel')
 endif
 subdir('mesa')