2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "intel_batchbuffer.h"
27 #include "intel_mipmap_tree.h"
28 #include "intel_fbo.h"
30 #include "brw_context.h"
31 #include "brw_state.h"
33 #include "blorp/blorp_genX_exec.h"
36 #include "gen4_blorp_exec.h"
39 #include "brw_blorp.h"
42 blorp_emit_dwords(struct blorp_batch
*batch
, unsigned n
)
44 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
45 struct brw_context
*brw
= batch
->driver_batch
;
47 intel_batchbuffer_begin(brw
, n
, RENDER_RING
);
48 uint32_t *map
= brw
->batch
.map_next
;
49 brw
->batch
.map_next
+= n
;
50 intel_batchbuffer_advance(brw
);
55 blorp_emit_reloc(struct blorp_batch
*batch
,
56 void *location
, struct blorp_address address
, uint32_t delta
)
58 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
59 struct brw_context
*brw
= batch
->driver_batch
;
61 uint32_t offset
= (char *)location
- (char *)brw
->batch
.map
;
62 return brw_emit_reloc(&brw
->batch
, offset
,
63 address
.buffer
, address
.offset
+ delta
,
65 address
.write_domain
);
69 blorp_surface_reloc(struct blorp_batch
*batch
, uint32_t ss_offset
,
70 struct blorp_address address
, uint32_t delta
)
72 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
73 struct brw_context
*brw
= batch
->driver_batch
;
74 struct brw_bo
*bo
= address
.buffer
;
76 brw_emit_reloc(&brw
->batch
, ss_offset
, bo
, address
.offset
+ delta
,
77 address
.read_domains
, address
.write_domain
);
79 uint64_t reloc_val
= bo
->offset64
+ address
.offset
+ delta
;
80 void *reloc_ptr
= (void *)brw
->batch
.map
+ ss_offset
;
82 *(uint64_t *)reloc_ptr
= reloc_val
;
84 *(uint32_t *)reloc_ptr
= reloc_val
;
89 blorp_alloc_dynamic_state(struct blorp_batch
*batch
,
94 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
95 struct brw_context
*brw
= batch
->driver_batch
;
97 return brw_state_batch(brw
, size
, alignment
, offset
);
101 blorp_alloc_binding_table(struct blorp_batch
*batch
, unsigned num_entries
,
102 unsigned state_size
, unsigned state_alignment
,
103 uint32_t *bt_offset
, uint32_t *surface_offsets
,
106 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
107 struct brw_context
*brw
= batch
->driver_batch
;
109 uint32_t *bt_map
= brw_state_batch(brw
,
110 num_entries
* sizeof(uint32_t), 32,
113 for (unsigned i
= 0; i
< num_entries
; i
++) {
114 surface_maps
[i
] = brw_state_batch(brw
,
115 state_size
, state_alignment
,
116 &(surface_offsets
)[i
]);
117 bt_map
[i
] = surface_offsets
[i
];
122 blorp_alloc_vertex_buffer(struct blorp_batch
*batch
, uint32_t size
,
123 struct blorp_address
*addr
)
125 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
126 struct brw_context
*brw
= batch
->driver_batch
;
128 /* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
130 * "The VF cache needs to be invalidated before binding and then using
131 * Vertex Buffers that overlap with any previously bound Vertex Buffer
132 * (at a 64B granularity) since the last invalidation. A VF cache
133 * invalidate is performed by setting the "VF Cache Invalidation Enable"
134 * bit in PIPE_CONTROL."
136 * This restriction first appears in the Skylake PRM but the internal docs
137 * also list it as being an issue on Broadwell. In order to avoid this
138 * problem, we align all vertex buffer allocations to 64 bytes.
141 void *data
= brw_state_batch(brw
, size
, 64, &offset
);
143 *addr
= (struct blorp_address
) {
144 .buffer
= brw
->batch
.bo
,
145 .read_domains
= I915_GEM_DOMAIN_VERTEX
,
154 static struct blorp_address
155 blorp_get_workaround_page(struct blorp_batch
*batch
)
157 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
158 struct brw_context
*brw
= batch
->driver_batch
;
160 return (struct blorp_address
) {
161 .buffer
= brw
->workaround_bo
,
167 blorp_flush_range(struct blorp_batch
*batch
, void *start
, size_t size
)
169 /* All allocated states come from the batch which we will flush before we
170 * submit it. There's nothing for us to do here.
175 blorp_emit_urb_config(struct blorp_batch
*batch
,
176 unsigned vs_entry_size
, unsigned sf_entry_size
)
178 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
179 struct brw_context
*brw
= batch
->driver_batch
;
182 if (!(brw
->ctx
.NewDriverState
& (BRW_NEW_CONTEXT
| BRW_NEW_URB_SIZE
)) &&
183 brw
->urb
.vsize
>= vs_entry_size
)
186 gen7_upload_urb(brw
, vs_entry_size
, false, false);
188 gen6_upload_urb(brw
, vs_entry_size
, false, 0);
190 /* We calculate it now and emit later. */
191 brw_calculate_urb_fence(brw
, 0, vs_entry_size
, sf_entry_size
);
196 genX(blorp_exec
)(struct blorp_batch
*batch
,
197 const struct blorp_params
*params
)
199 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
200 struct brw_context
*brw
= batch
->driver_batch
;
201 struct gl_context
*ctx
= &brw
->ctx
;
202 const uint32_t estimated_max_batch_usage
= GEN_GEN
>= 8 ? 1920 : 1700;
203 bool check_aperture_failed_once
= false;
205 /* Flush the sampler and render caches. We definitely need to flush the
206 * sampler cache so that we get updated contents from the render cache for
207 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
208 * docs to flush the cache between reinterpretations of the same surface
209 * data with different formats, which blorp does for stencil and depth
212 if (params
->src
.enabled
)
213 brw_render_cache_set_check_flush(brw
, params
->src
.addr
.buffer
);
214 brw_render_cache_set_check_flush(brw
, params
->dst
.addr
.buffer
);
216 brw_select_pipeline(brw
, BRW_RENDER_PIPELINE
);
219 intel_batchbuffer_require_space(brw
, estimated_max_batch_usage
, RENDER_RING
);
220 intel_batchbuffer_save_state(brw
);
221 struct brw_bo
*saved_bo
= brw
->batch
.bo
;
222 uint32_t saved_used
= USED_BATCH(brw
->batch
);
223 uint32_t saved_state_batch_offset
= brw
->batch
.state_batch_offset
;
226 /* Emit workaround flushes when we switch from drawing to blorping. */
227 brw_emit_post_sync_nonzero_flush(brw
);
230 brw_upload_state_base_address(brw
);
233 gen7_l3_state
.emit(brw
);
237 brw_emit_depth_stall_flushes(brw
);
241 gen8_write_pma_stall_bits(brw
, 0);
244 blorp_emit(batch
, GENX(3DSTATE_DRAWING_RECTANGLE
), rect
) {
245 rect
.ClippedDrawingRectangleXMax
= MAX2(params
->x1
, params
->x0
) - 1;
246 rect
.ClippedDrawingRectangleYMax
= MAX2(params
->y1
, params
->y0
) - 1;
249 blorp_exec(batch
, params
);
251 /* Make sure we didn't wrap the batch unintentionally, and make sure we
252 * reserved enough space that a wrap will never happen.
254 assert(brw
->batch
.bo
== saved_bo
);
255 assert((USED_BATCH(brw
->batch
) - saved_used
) * 4 +
256 (saved_state_batch_offset
- brw
->batch
.state_batch_offset
) <
257 estimated_max_batch_usage
);
258 /* Shut up compiler warnings on release build */
261 (void)saved_state_batch_offset
;
263 /* Check if the blorp op we just did would make our batch likely to fail to
264 * map all the BOs into the GPU at batch exec time later. If so, flush the
265 * batch and try again with nothing else in the batch.
267 if (!brw_batch_has_aperture_space(brw
, 0)) {
268 if (!check_aperture_failed_once
) {
269 check_aperture_failed_once
= true;
270 intel_batchbuffer_reset_to_saved(brw
);
271 intel_batchbuffer_flush(brw
);
274 int ret
= intel_batchbuffer_flush(brw
);
275 WARN_ONCE(ret
== -ENOSPC
,
276 "i965: blorp emit exceeded available aperture space\n");
280 if (unlikely(brw
->always_flush_batch
))
281 intel_batchbuffer_flush(brw
);
283 /* We've smashed all state compared to what the normal 3D pipeline
284 * rendering tracks for GL.
286 brw
->ctx
.NewDriverState
|= BRW_NEW_BLORP
;
287 brw
->no_depth_or_stencil
= !params
->depth
.enabled
&&
288 !params
->stencil
.enabled
;
289 brw
->ib
.index_size
= -1;
291 if (params
->dst
.enabled
)
292 brw_render_cache_set_add_bo(brw
, params
->dst
.addr
.buffer
);
293 if (params
->depth
.enabled
)
294 brw_render_cache_set_add_bo(brw
, params
->depth
.addr
.buffer
);
295 if (params
->stencil
.enabled
)
296 brw_render_cache_set_add_bo(brw
, params
->stencil
.addr
.buffer
);