i965: Delete vestiges of resource streamer code.
[mesa.git] / src / mesa / drivers / dri / i965 / genX_blorp_exec.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25
26 #include "intel_batchbuffer.h"
27 #include "intel_mipmap_tree.h"
28 #include "intel_fbo.h"
29
30 #include "brw_context.h"
31 #include "brw_state.h"
32
33 #include "blorp/blorp_genX_exec.h"
34
35 #include "brw_blorp.h"
36
37 static void *
38 blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
39 {
40 assert(batch->blorp->driver_ctx == batch->driver_batch);
41 struct brw_context *brw = batch->driver_batch;
42
43 intel_batchbuffer_begin(brw, n, RENDER_RING);
44 uint32_t *map = brw->batch.map_next;
45 brw->batch.map_next += n;
46 intel_batchbuffer_advance(brw);
47 return map;
48 }
49
50 static uint64_t
51 blorp_emit_reloc(struct blorp_batch *batch,
52 void *location, struct blorp_address address, uint32_t delta)
53 {
54 assert(batch->blorp->driver_ctx == batch->driver_batch);
55 struct brw_context *brw = batch->driver_batch;
56
57 uint32_t offset = (char *)location - (char *)brw->batch.map;
58 if (brw->gen >= 8) {
59 return intel_batchbuffer_reloc64(&brw->batch, address.buffer, offset,
60 address.read_domains,
61 address.write_domain,
62 address.offset + delta);
63 } else {
64 return intel_batchbuffer_reloc(&brw->batch, address.buffer, offset,
65 address.read_domains,
66 address.write_domain,
67 address.offset + delta);
68 }
69 }
70
71 static void
72 blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
73 struct blorp_address address, uint32_t delta)
74 {
75 assert(batch->blorp->driver_ctx == batch->driver_batch);
76 struct brw_context *brw = batch->driver_batch;
77 drm_intel_bo *bo = address.buffer;
78
79 drm_intel_bo_emit_reloc(brw->batch.bo, ss_offset,
80 bo, address.offset + delta,
81 address.read_domains, address.write_domain);
82
83 uint64_t reloc_val = bo->offset64 + address.offset + delta;
84 void *reloc_ptr = (void *)brw->batch.map + ss_offset;
85 #if GEN_GEN >= 8
86 *(uint64_t *)reloc_ptr = reloc_val;
87 #else
88 *(uint32_t *)reloc_ptr = reloc_val;
89 #endif
90 }
91
92 static void *
93 blorp_alloc_dynamic_state(struct blorp_batch *batch,
94 enum aub_state_struct_type type,
95 uint32_t size,
96 uint32_t alignment,
97 uint32_t *offset)
98 {
99 assert(batch->blorp->driver_ctx == batch->driver_batch);
100 struct brw_context *brw = batch->driver_batch;
101
102 return brw_state_batch(brw, type, size, alignment, offset);
103 }
104
105 static void
106 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
107 unsigned state_size, unsigned state_alignment,
108 uint32_t *bt_offset, uint32_t *surface_offsets,
109 void **surface_maps)
110 {
111 assert(batch->blorp->driver_ctx == batch->driver_batch);
112 struct brw_context *brw = batch->driver_batch;
113
114 uint32_t *bt_map = brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
115 num_entries * sizeof(uint32_t), 32,
116 bt_offset);
117
118 for (unsigned i = 0; i < num_entries; i++) {
119 surface_maps[i] = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
120 state_size, state_alignment,
121 &(surface_offsets)[i]);
122 bt_map[i] = surface_offsets[i];
123 }
124 }
125
126 static void *
127 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
128 struct blorp_address *addr)
129 {
130 assert(batch->blorp->driver_ctx == batch->driver_batch);
131 struct brw_context *brw = batch->driver_batch;
132
133 uint32_t offset;
134 void *data = brw_state_batch(brw, AUB_TRACE_VERTEX_BUFFER,
135 size, 32, &offset);
136
137 *addr = (struct blorp_address) {
138 .buffer = brw->batch.bo,
139 .read_domains = I915_GEM_DOMAIN_VERTEX,
140 .write_domain = 0,
141 .offset = offset,
142 };
143
144 return data;
145 }
146
147 static void
148 blorp_flush_range(struct blorp_batch *batch, void *start, size_t size)
149 {
150 /* All allocated states come from the batch which we will flush before we
151 * submit it. There's nothing for us to do here.
152 */
153 }
154
155 static void
156 blorp_emit_urb_config(struct blorp_batch *batch, unsigned vs_entry_size)
157 {
158 assert(batch->blorp->driver_ctx == batch->driver_batch);
159 struct brw_context *brw = batch->driver_batch;
160
161 #if GEN_GEN >= 7
162 if (!(brw->ctx.NewDriverState & (BRW_NEW_CONTEXT | BRW_NEW_URB_SIZE)) &&
163 brw->urb.vsize >= vs_entry_size)
164 return;
165
166 brw->ctx.NewDriverState |= BRW_NEW_URB_SIZE;
167
168 gen7_upload_urb(brw, vs_entry_size, false, false);
169 #else
170 gen6_upload_urb(brw, vs_entry_size, false, 0);
171 #endif
172 }
173
174 void
175 genX(blorp_exec)(struct blorp_batch *batch,
176 const struct blorp_params *params)
177 {
178 assert(batch->blorp->driver_ctx == batch->driver_batch);
179 struct brw_context *brw = batch->driver_batch;
180 struct gl_context *ctx = &brw->ctx;
181 const uint32_t estimated_max_batch_usage = GEN_GEN >= 8 ? 1800 : 1500;
182 bool check_aperture_failed_once = false;
183
184 /* Flush the sampler and render caches. We definitely need to flush the
185 * sampler cache so that we get updated contents from the render cache for
186 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
187 * docs to flush the cache between reinterpretations of the same surface
188 * data with different formats, which blorp does for stencil and depth
189 * data.
190 */
191 if (params->src.enabled)
192 brw_render_cache_set_check_flush(brw, params->src.addr.buffer);
193 brw_render_cache_set_check_flush(brw, params->dst.addr.buffer);
194
195 brw_select_pipeline(brw, BRW_RENDER_PIPELINE);
196
197 retry:
198 intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
199 intel_batchbuffer_save_state(brw);
200 drm_intel_bo *saved_bo = brw->batch.bo;
201 uint32_t saved_used = USED_BATCH(brw->batch);
202 uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
203
204 #if GEN_GEN == 6
205 /* Emit workaround flushes when we switch from drawing to blorping. */
206 brw_emit_post_sync_nonzero_flush(brw);
207 #endif
208
209 brw_upload_state_base_address(brw);
210
211 #if GEN_GEN >= 8
212 gen7_l3_state.emit(brw);
213 #endif
214
215 brw_emit_depth_stall_flushes(brw);
216
217 #if GEN_GEN == 8
218 gen8_write_pma_stall_bits(brw, 0);
219 #endif
220
221 blorp_emit(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
222 rect.ClippedDrawingRectangleXMax = MAX2(params->x1, params->x0) - 1;
223 rect.ClippedDrawingRectangleYMax = MAX2(params->y1, params->y0) - 1;
224 }
225
226 blorp_exec(batch, params);
227
228 /* Make sure we didn't wrap the batch unintentionally, and make sure we
229 * reserved enough space that a wrap will never happen.
230 */
231 assert(brw->batch.bo == saved_bo);
232 assert((USED_BATCH(brw->batch) - saved_used) * 4 +
233 (saved_state_batch_offset - brw->batch.state_batch_offset) <
234 estimated_max_batch_usage);
235 /* Shut up compiler warnings on release build */
236 (void)saved_bo;
237 (void)saved_used;
238 (void)saved_state_batch_offset;
239
240 /* Check if the blorp op we just did would make our batch likely to fail to
241 * map all the BOs into the GPU at batch exec time later. If so, flush the
242 * batch and try again with nothing else in the batch.
243 */
244 if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
245 if (!check_aperture_failed_once) {
246 check_aperture_failed_once = true;
247 intel_batchbuffer_reset_to_saved(brw);
248 intel_batchbuffer_flush(brw);
249 goto retry;
250 } else {
251 int ret = intel_batchbuffer_flush(brw);
252 WARN_ONCE(ret == -ENOSPC,
253 "i965: blorp emit exceeded available aperture space\n");
254 }
255 }
256
257 if (unlikely(brw->always_flush_batch))
258 intel_batchbuffer_flush(brw);
259
260 /* We've smashed all state compared to what the normal 3D pipeline
261 * rendering tracks for GL.
262 */
263 brw->ctx.NewDriverState |= BRW_NEW_BLORP;
264 brw->no_depth_or_stencil = false;
265 brw->ib.type = -1;
266
267 if (params->dst.enabled)
268 brw_render_cache_set_add_bo(brw, params->dst.addr.buffer);
269 if (params->depth.enabled)
270 brw_render_cache_set_add_bo(brw, params->depth.addr.buffer);
271 if (params->stencil.enabled)
272 brw_render_cache_set_add_bo(brw, params->stencil.addr.buffer);
273 }