intel: Add a none working GEM backend for intel
[mesa.git] / src / gallium / winsys / drm / intel / gem / intel_be_batchbuffer.c
1
2 #include "intel_be_batchbuffer.h"
3 #include "intel_be_context.h"
4 #include "intel_be_device.h"
5 #include "intel_be_fence.h"
6 #include <errno.h>
7
8 #include "util/u_memory.h"
9
10 struct intel_be_batchbuffer *
11 intel_be_batchbuffer_alloc(struct intel_be_context *intel)
12 {
13 struct intel_be_batchbuffer *batch = CALLOC_STRUCT(intel_be_batchbuffer);
14
15 batch->base.buffer = NULL;
16 batch->base.winsys = &intel->base;
17 batch->base.map = NULL;
18 batch->base.ptr = NULL;
19 batch->base.size = 0;
20 batch->base.actual_size = intel->device->max_batch_size;
21 batch->base.relocs = 0;
22 batch->base.max_relocs = INTEL_DEFAULT_RELOCS;
23
24 batch->base.map = malloc(batch->base.actual_size);
25 memset(batch->base.map, 0, batch->base.actual_size);
26
27 batch->base.ptr = batch->base.map;
28
29 intel_be_batchbuffer_reset(batch);
30
31 return NULL;
32 }
33
34 void
35 intel_be_batchbuffer_reset(struct intel_be_batchbuffer *batch)
36 {
37 struct intel_be_context *intel = intel_be_context(batch->base.winsys);
38 struct intel_be_device *dev = intel->device;
39
40 if (batch->bo)
41 drm_intel_bo_unreference(batch->bo);
42
43 memset(batch->base.map, 0, batch->base.actual_size);
44 batch->base.ptr = batch->base.map;
45 batch->base.size = batch->base.actual_size - BATCH_RESERVED;
46
47 batch->base.relocs = 0;
48 batch->base.max_relocs = INTEL_DEFAULT_RELOCS;
49
50 batch->bo = drm_intel_bo_alloc(dev->pools.gem,
51 "gallium3d_batch_buffer",
52 batch->base.actual_size, 0);
53 }
54
55 int
56 intel_be_offset_relocation(struct intel_be_batchbuffer *batch,
57 unsigned pre_add,
58 drm_intel_bo *bo,
59 uint32_t read_domains,
60 uint32_t write_domain)
61 {
62 unsigned offset;
63 int ret = 0;
64
65 assert(batch->base.relocs < batch->base.max_relocs);
66
67 offset = (unsigned)(batch->base.ptr - batch->base.map);
68 batch->base.ptr += 4;
69
70 /*
71 TODO: Enable this when we submit batch buffers to HW
72 ret = drm_intel_bo_emit_reloc(bo, pre_add,
73 batch->bo, offset,
74 read_domains,
75 write_domain);
76 */
77
78 if (!ret)
79 batch->base.relocs++;
80
81 return ret;
82 }
83
84 void
85 intel_be_batchbuffer_flush(struct intel_be_batchbuffer *batch,
86 struct intel_be_fence **fence)
87 {
88 struct i915_batchbuffer *i915 = &batch->base;
89
90 assert(i915_batchbuffer_space(i915) >= 0);
91
92 /* TODO: submit stuff to HW */
93
94 intel_be_batchbuffer_reset(batch);
95
96 if (fence) {
97 if (*fence)
98 intel_be_fence_unreference(*fence);
99
100 (*fence) = CALLOC_STRUCT(intel_be_fence);
101 (*fence)->refcount = 1;
102 (*fence)->bo = NULL;
103 }
104 }
105
106 void
107 intel_be_batchbuffer_finish(struct intel_be_batchbuffer *batch)
108 {
109
110 }
111
112 void
113 intel_be_batchbuffer_free(struct intel_be_batchbuffer *batch)
114 {
115 if (batch->bo)
116 drm_intel_bo_unreference(batch->bo);
117
118 free(batch->base.map);
119 free(batch);
120 }