radeon/r200/r300: cleanup some of the renderbuffer code
[mesa.git] / src / gallium / winsys / drm / intel / gem / intel_be_batchbuffer.c
1
2 #include "i915simple/i915_debug.h"
3 #include "intel_be_batchbuffer.h"
4 #include "intel_be_context.h"
5 #include "intel_be_device.h"
6 #include "intel_be_fence.h"
7 #include <errno.h>
8
9 #include "util/u_memory.h"
10
11 struct intel_be_batchbuffer *
12 intel_be_batchbuffer_alloc(struct intel_be_context *intel)
13 {
14 struct intel_be_batchbuffer *batch = CALLOC_STRUCT(intel_be_batchbuffer);
15
16
17 batch->base.buffer = NULL;
18 batch->base.winsys = &intel->base;
19 batch->base.map = NULL;
20 batch->base.ptr = NULL;
21 batch->base.size = 0;
22 batch->base.actual_size = intel->device->max_batch_size;
23 batch->base.relocs = 0;
24 batch->base.max_relocs = INTEL_DEFAULT_RELOCS;
25
26 batch->base.map = malloc(batch->base.actual_size);
27 memset(batch->base.map, 0, batch->base.actual_size);
28
29 batch->base.ptr = batch->base.map;
30
31 intel_be_batchbuffer_reset(batch);
32
33 return batch;
34 }
35
36 void
37 intel_be_batchbuffer_reset(struct intel_be_batchbuffer *batch)
38 {
39 struct intel_be_context *intel = intel_be_context(batch->base.winsys);
40 struct intel_be_device *dev = intel->device;
41
42 if (batch->bo)
43 drm_intel_bo_unreference(batch->bo);
44
45 memset(batch->base.map, 0, batch->base.actual_size);
46 batch->base.ptr = batch->base.map;
47 batch->base.size = batch->base.actual_size - BATCH_RESERVED;
48
49 batch->base.relocs = 0;
50 batch->base.max_relocs = INTEL_DEFAULT_RELOCS;
51
52 batch->bo = drm_intel_bo_alloc(dev->pools.gem,
53 "gallium3d_batch_buffer",
54 batch->base.actual_size, 0);
55 }
56
57 int
58 intel_be_offset_relocation(struct intel_be_batchbuffer *batch,
59 unsigned pre_add,
60 drm_intel_bo *bo,
61 uint32_t read_domains,
62 uint32_t write_domain)
63 {
64 unsigned offset;
65 int ret = 0;
66
67 assert(batch->base.relocs < batch->base.max_relocs);
68
69 offset = (unsigned)(batch->base.ptr - batch->base.map);
70
71 ret = drm_intel_bo_emit_reloc(batch->bo, offset,
72 bo, pre_add,
73 read_domains,
74 write_domain);
75
76 ((uint32_t*)batch->base.ptr)[0] = bo->offset + pre_add;
77 batch->base.ptr += 4;
78
79 if (!ret)
80 batch->base.relocs++;
81
82 return ret;
83 }
84
85 void
86 intel_be_batchbuffer_flush(struct intel_be_batchbuffer *batch,
87 struct intel_be_fence **fence)
88 {
89 struct i915_batchbuffer *i915 = &batch->base;
90 unsigned used = 0;
91 int ret = 0;
92
93 assert(i915_batchbuffer_space(i915) >= 0);
94
95 used = batch->base.ptr - batch->base.map;
96 assert((used & 3) == 0);
97
98 if (used & 4) {
99 i915_batchbuffer_dword(i915, (0x0<<29)|(0x4<<23)|(1<<0)); // MI_FLUSH | FLUSH_MAP_CACHE;
100 i915_batchbuffer_dword(i915, (0x0<<29)|(0x0<<23)); // MI_NOOP
101 i915_batchbuffer_dword(i915, (0x0<<29)|(0xA<<23)); // MI_BATCH_BUFFER_END;
102 } else {
103 i915_batchbuffer_dword(i915, (0x0<<29)|(0x4<<23)|(1<<0)); //MI_FLUSH | FLUSH_MAP_CACHE;
104 i915_batchbuffer_dword(i915, (0x0<<29)|(0xA<<23)); // MI_BATCH_BUFFER_END;
105 }
106
107 used = batch->base.ptr - batch->base.map;
108
109 drm_intel_bo_subdata(batch->bo, 0, used, batch->base.map);
110 ret = drm_intel_bo_exec(batch->bo, used, NULL, 0, 0);
111
112 assert(ret == 0);
113
114 intel_be_batchbuffer_reset(batch);
115
116 if (fence) {
117 if (*fence)
118 intel_be_fence_unreference(*fence);
119
120 (*fence) = CALLOC_STRUCT(intel_be_fence);
121 (*fence)->refcount = 1;
122 (*fence)->bo = NULL;
123 }
124 }
125
126 void
127 intel_be_batchbuffer_finish(struct intel_be_batchbuffer *batch)
128 {
129
130 }
131
132 void
133 intel_be_batchbuffer_free(struct intel_be_batchbuffer *batch)
134 {
135 if (batch->bo)
136 drm_intel_bo_unreference(batch->bo);
137
138 free(batch->base.map);
139 free(batch);
140 }