2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "intel_batchbuffer.h"
27 #include "intel_mipmap_tree.h"
28 #include "intel_fbo.h"
30 #include "brw_context.h"
31 #include "brw_state.h"
33 #include "blorp/blorp_genX_exec.h"
35 #include "brw_blorp.h"
38 blorp_emit_dwords(struct blorp_batch
*batch
, unsigned n
)
40 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
41 struct brw_context
*brw
= batch
->driver_batch
;
43 intel_batchbuffer_begin(brw
, n
, RENDER_RING
);
44 uint32_t *map
= brw
->batch
.map_next
;
45 brw
->batch
.map_next
+= n
;
46 intel_batchbuffer_advance(brw
);
51 blorp_emit_reloc(struct blorp_batch
*batch
,
52 void *location
, struct blorp_address address
, uint32_t delta
)
54 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
55 struct brw_context
*brw
= batch
->driver_batch
;
57 uint32_t offset
= (char *)location
- (char *)brw
->batch
.map
;
59 return intel_batchbuffer_reloc64(&brw
->batch
, address
.buffer
, offset
,
62 address
.offset
+ delta
);
64 return intel_batchbuffer_reloc(&brw
->batch
, address
.buffer
, offset
,
67 address
.offset
+ delta
);
72 blorp_surface_reloc(struct blorp_batch
*batch
, uint32_t ss_offset
,
73 struct blorp_address address
, uint32_t delta
)
75 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
76 struct brw_context
*brw
= batch
->driver_batch
;
77 drm_intel_bo
*bo
= address
.buffer
;
79 drm_intel_bo_emit_reloc(brw
->batch
.bo
, ss_offset
,
80 bo
, address
.offset
+ delta
,
81 address
.read_domains
, address
.write_domain
);
83 uint64_t reloc_val
= bo
->offset64
+ address
.offset
+ delta
;
84 void *reloc_ptr
= (void *)brw
->batch
.map
+ ss_offset
;
86 *(uint64_t *)reloc_ptr
= reloc_val
;
88 *(uint32_t *)reloc_ptr
= reloc_val
;
93 blorp_alloc_dynamic_state(struct blorp_batch
*batch
,
94 enum aub_state_struct_type type
,
99 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
100 struct brw_context
*brw
= batch
->driver_batch
;
102 return brw_state_batch(brw
, type
, size
, alignment
, offset
);
106 blorp_alloc_binding_table(struct blorp_batch
*batch
, unsigned num_entries
,
107 unsigned state_size
, unsigned state_alignment
,
108 uint32_t *bt_offset
, uint32_t *surface_offsets
,
111 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
112 struct brw_context
*brw
= batch
->driver_batch
;
114 uint32_t *bt_map
= brw_state_batch(brw
, AUB_TRACE_BINDING_TABLE
,
115 num_entries
* sizeof(uint32_t), 32,
118 for (unsigned i
= 0; i
< num_entries
; i
++) {
119 surface_maps
[i
] = brw_state_batch(brw
, AUB_TRACE_SURFACE_STATE
,
120 state_size
, state_alignment
,
121 &(surface_offsets
)[i
]);
122 bt_map
[i
] = surface_offsets
[i
];
127 blorp_alloc_vertex_buffer(struct blorp_batch
*batch
, uint32_t size
,
128 struct blorp_address
*addr
)
130 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
131 struct brw_context
*brw
= batch
->driver_batch
;
134 void *data
= brw_state_batch(brw
, AUB_TRACE_VERTEX_BUFFER
,
137 *addr
= (struct blorp_address
) {
138 .buffer
= brw
->batch
.bo
,
139 .read_domains
= I915_GEM_DOMAIN_VERTEX
,
148 blorp_emit_urb_config(struct blorp_batch
*batch
, unsigned vs_entry_size
)
150 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
151 struct brw_context
*brw
= batch
->driver_batch
;
154 if (!(brw
->ctx
.NewDriverState
& (BRW_NEW_CONTEXT
| BRW_NEW_URB_SIZE
)) &&
155 brw
->urb
.vsize
>= vs_entry_size
)
158 brw
->ctx
.NewDriverState
|= BRW_NEW_URB_SIZE
;
160 gen7_upload_urb(brw
, vs_entry_size
, false, false);
162 gen6_upload_urb(brw
, vs_entry_size
, false, 0);
167 genX(blorp_exec
)(struct blorp_batch
*batch
,
168 const struct blorp_params
*params
)
170 assert(batch
->blorp
->driver_ctx
== batch
->driver_batch
);
171 struct brw_context
*brw
= batch
->driver_batch
;
172 struct gl_context
*ctx
= &brw
->ctx
;
173 const uint32_t estimated_max_batch_usage
= GEN_GEN
>= 8 ? 1800 : 1500;
174 bool check_aperture_failed_once
= false;
176 /* Flush the sampler and render caches. We definitely need to flush the
177 * sampler cache so that we get updated contents from the render cache for
178 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
179 * docs to flush the cache between reinterpretations of the same surface
180 * data with different formats, which blorp does for stencil and depth
183 if (params
->src
.enabled
)
184 brw_render_cache_set_check_flush(brw
, params
->src
.addr
.buffer
);
185 brw_render_cache_set_check_flush(brw
, params
->dst
.addr
.buffer
);
187 brw_select_pipeline(brw
, BRW_RENDER_PIPELINE
);
190 intel_batchbuffer_require_space(brw
, estimated_max_batch_usage
, RENDER_RING
);
191 intel_batchbuffer_save_state(brw
);
192 drm_intel_bo
*saved_bo
= brw
->batch
.bo
;
193 uint32_t saved_used
= USED_BATCH(brw
->batch
);
194 uint32_t saved_state_batch_offset
= brw
->batch
.state_batch_offset
;
197 /* Emit workaround flushes when we switch from drawing to blorping. */
198 brw_emit_post_sync_nonzero_flush(brw
);
201 brw_upload_state_base_address(brw
);
204 gen7_l3_state
.emit(brw
);
207 if (brw
->use_resource_streamer
)
208 gen7_disable_hw_binding_tables(brw
);
210 brw_emit_depth_stall_flushes(brw
);
213 gen8_write_pma_stall_bits(brw
, 0);
216 blorp_emit(batch
, GENX(3DSTATE_DRAWING_RECTANGLE
), rect
) {
217 rect
.ClippedDrawingRectangleXMax
= MAX2(params
->x1
, params
->x0
) - 1;
218 rect
.ClippedDrawingRectangleYMax
= MAX2(params
->y1
, params
->y0
) - 1;
221 blorp_exec(batch
, params
);
223 /* Make sure we didn't wrap the batch unintentionally, and make sure we
224 * reserved enough space that a wrap will never happen.
226 assert(brw
->batch
.bo
== saved_bo
);
227 assert((USED_BATCH(brw
->batch
) - saved_used
) * 4 +
228 (saved_state_batch_offset
- brw
->batch
.state_batch_offset
) <
229 estimated_max_batch_usage
);
230 /* Shut up compiler warnings on release build */
233 (void)saved_state_batch_offset
;
235 /* Check if the blorp op we just did would make our batch likely to fail to
236 * map all the BOs into the GPU at batch exec time later. If so, flush the
237 * batch and try again with nothing else in the batch.
239 if (dri_bufmgr_check_aperture_space(&brw
->batch
.bo
, 1)) {
240 if (!check_aperture_failed_once
) {
241 check_aperture_failed_once
= true;
242 intel_batchbuffer_reset_to_saved(brw
);
243 intel_batchbuffer_flush(brw
);
246 int ret
= intel_batchbuffer_flush(brw
);
247 WARN_ONCE(ret
== -ENOSPC
,
248 "i965: blorp emit exceeded available aperture space\n");
252 if (unlikely(brw
->always_flush_batch
))
253 intel_batchbuffer_flush(brw
);
255 /* We've smashed all state compared to what the normal 3D pipeline
256 * rendering tracks for GL.
258 brw
->ctx
.NewDriverState
|= BRW_NEW_BLORP
;
259 brw
->no_depth_or_stencil
= false;
262 if (params
->dst
.enabled
)
263 brw_render_cache_set_add_bo(brw
, params
->dst
.addr
.buffer
);