2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
30 * GenX specific code for working with BLORP (blitting, resolves, clears
31 * on the 3D engine). This provides the driver-specific hooks needed to
32 * implement the BLORP API.
34 * See iris_blit.c, iris_clear.c, and so on.
39 #include "iris_batch.h"
40 #include "iris_resource.h"
41 #include "iris_context.h"
43 #include "util/u_upload_mgr.h"
44 #include "intel/common/gen_l3_config.h"
46 #define BLORP_USE_SOFTPIN
47 #include "blorp/blorp_genX_exec.h"
50 stream_state(struct iris_batch
*batch
,
51 struct u_upload_mgr
*uploader
,
55 struct iris_bo
**out_bo
)
57 struct pipe_resource
*res
= NULL
;
60 u_upload_alloc(uploader
, 0, size
, alignment
, out_offset
, &res
, &ptr
);
62 struct iris_bo
*bo
= iris_resource_bo(res
);
63 iris_use_pinned_bo(batch
, bo
, false, IRIS_DOMAIN_NONE
);
65 iris_record_state_size(batch
->state_sizes
,
66 bo
->gtt_offset
+ *out_offset
, size
);
68 /* If the caller has asked for a BO, we leave them the responsibility of
69 * adding bo->gtt_offset (say, by handing an address to genxml). If not,
70 * we assume they want the offset from a base address.
75 *out_offset
+= iris_bo_offset_from_base_address(bo
);
77 pipe_resource_reference(&res
, NULL
);
83 blorp_emit_dwords(struct blorp_batch
*blorp_batch
, unsigned n
)
85 struct iris_batch
*batch
= blorp_batch
->driver_batch
;
86 return iris_get_command_space(batch
, n
* sizeof(uint32_t));
90 combine_and_pin_address(struct blorp_batch
*blorp_batch
,
91 struct blorp_address addr
)
93 struct iris_batch
*batch
= blorp_batch
->driver_batch
;
94 struct iris_bo
*bo
= addr
.buffer
;
96 iris_use_pinned_bo(batch
, bo
, addr
.reloc_flags
& RELOC_WRITE
,
99 /* Assume this is a general address, not relative to a base. */
100 return bo
->gtt_offset
+ addr
.offset
;
104 blorp_emit_reloc(struct blorp_batch
*blorp_batch
, UNUSED
void *location
,
105 struct blorp_address addr
, uint32_t delta
)
107 return combine_and_pin_address(blorp_batch
, addr
) + delta
;
111 blorp_surface_reloc(struct blorp_batch
*blorp_batch
, uint32_t ss_offset
,
112 struct blorp_address addr
, uint32_t delta
)
114 /* Let blorp_get_surface_address do the pinning. */
118 blorp_get_surface_address(struct blorp_batch
*blorp_batch
,
119 struct blorp_address addr
)
121 return combine_and_pin_address(blorp_batch
, addr
);
124 UNUSED
static struct blorp_address
125 blorp_get_surface_base_address(UNUSED
struct blorp_batch
*blorp_batch
)
127 return (struct blorp_address
) { .offset
= IRIS_MEMZONE_BINDER_START
};
131 blorp_alloc_dynamic_state(struct blorp_batch
*blorp_batch
,
136 struct iris_context
*ice
= blorp_batch
->blorp
->driver_ctx
;
137 struct iris_batch
*batch
= blorp_batch
->driver_batch
;
139 return stream_state(batch
, ice
->state
.dynamic_uploader
,
140 size
, alignment
, offset
, NULL
);
144 blorp_alloc_binding_table(struct blorp_batch
*blorp_batch
,
145 unsigned num_entries
,
147 unsigned state_alignment
,
149 uint32_t *surface_offsets
,
152 struct iris_context
*ice
= blorp_batch
->blorp
->driver_ctx
;
153 struct iris_binder
*binder
= &ice
->state
.binder
;
154 struct iris_batch
*batch
= blorp_batch
->driver_batch
;
156 *bt_offset
= iris_binder_reserve(ice
, num_entries
* sizeof(uint32_t));
157 uint32_t *bt_map
= binder
->map
+ *bt_offset
;
159 for (unsigned i
= 0; i
< num_entries
; i
++) {
160 surface_maps
[i
] = stream_state(batch
, ice
->state
.surface_uploader
,
161 state_size
, state_alignment
,
162 &surface_offsets
[i
], NULL
);
163 bt_map
[i
] = surface_offsets
[i
] - (uint32_t) binder
->bo
->gtt_offset
;
166 iris_use_pinned_bo(batch
, binder
->bo
, false, IRIS_DOMAIN_NONE
);
168 batch
->screen
->vtbl
.update_surface_base_address(batch
, binder
);
172 blorp_alloc_vertex_buffer(struct blorp_batch
*blorp_batch
,
174 struct blorp_address
*addr
)
176 struct iris_context
*ice
= blorp_batch
->blorp
->driver_ctx
;
177 struct iris_batch
*batch
= blorp_batch
->driver_batch
;
181 void *map
= stream_state(batch
, ice
->ctx
.stream_uploader
, size
, 64,
184 *addr
= (struct blorp_address
) {
187 .mocs
= iris_mocs(bo
, &batch
->screen
->isl_dev
),
194 * See iris_upload_render_state's IRIS_DIRTY_VERTEX_BUFFERS handling for
195 * a comment about why these VF invalidations are needed.
198 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch
*blorp_batch
,
199 const struct blorp_address
*addrs
,
200 UNUSED
uint32_t *sizes
,
204 struct iris_context
*ice
= blorp_batch
->blorp
->driver_ctx
;
205 struct iris_batch
*batch
= blorp_batch
->driver_batch
;
206 bool need_invalidate
= false;
208 for (unsigned i
= 0; i
< num_vbs
; i
++) {
209 struct iris_bo
*bo
= addrs
[i
].buffer
;
210 uint16_t high_bits
= bo
->gtt_offset
>> 32u;
212 if (high_bits
!= ice
->state
.last_vbo_high_bits
[i
]) {
213 need_invalidate
= true;
214 ice
->state
.last_vbo_high_bits
[i
] = high_bits
;
218 if (need_invalidate
) {
219 iris_emit_pipe_control_flush(batch
,
220 "workaround: VF cache 32-bit key [blorp]",
221 PIPE_CONTROL_VF_CACHE_INVALIDATE
|
222 PIPE_CONTROL_CS_STALL
);
227 static struct blorp_address
228 blorp_get_workaround_address(struct blorp_batch
*blorp_batch
)
230 struct iris_batch
*batch
= blorp_batch
->driver_batch
;
232 return (struct blorp_address
) {
233 .buffer
= batch
->screen
->workaround_address
.bo
,
234 .offset
= batch
->screen
->workaround_address
.offset
,
239 blorp_flush_range(UNUSED
struct blorp_batch
*blorp_batch
,
243 /* All allocated states come from the batch which we will flush before we
244 * submit it. There's nothing for us to do here.
248 static const struct gen_l3_config
*
249 blorp_get_l3_config(struct blorp_batch
*blorp_batch
)
251 struct iris_batch
*batch
= blorp_batch
->driver_batch
;
252 return batch
->screen
->l3_config_3d
;
256 iris_blorp_exec(struct blorp_batch
*blorp_batch
,
257 const struct blorp_params
*params
)
259 struct iris_context
*ice
= blorp_batch
->blorp
->driver_ctx
;
260 struct iris_batch
*batch
= blorp_batch
->driver_batch
;
263 /* The PIPE_CONTROL command description says:
265 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
266 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
267 * Target Cache Flush by enabling this bit. When render target flush
268 * is set due to new association of BTI, PS Scoreboard Stall bit must
269 * be set in this packet."
271 iris_emit_pipe_control_flush(batch
,
272 "workaround: RT BTI change [blorp]",
273 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
274 PIPE_CONTROL_STALL_AT_SCOREBOARD
);
277 /* Flush the sampler and render caches. We definitely need to flush the
278 * sampler cache so that we get updated contents from the render cache for
279 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
280 * docs to flush the cache between reinterpretations of the same surface
281 * data with different formats, which blorp does for stencil and depth
284 if (params
->src
.enabled
)
285 iris_cache_flush_for_read(batch
, params
->src
.addr
.buffer
);
286 if (params
->dst
.enabled
) {
287 iris_cache_flush_for_render(batch
, params
->dst
.addr
.buffer
,
288 params
->dst
.view
.format
,
289 params
->dst
.aux_usage
);
291 if (params
->depth
.enabled
)
292 iris_cache_flush_for_depth(batch
, params
->depth
.addr
.buffer
);
293 if (params
->stencil
.enabled
)
294 iris_cache_flush_for_depth(batch
, params
->stencil
.addr
.buffer
);
296 iris_require_command_space(batch
, 1400);
299 genX(update_pma_fix
)(ice
, batch
, false);
302 const unsigned scale
= params
->fast_clear_op
? UINT_MAX
: 1;
303 if (ice
->state
.current_hash_scale
!= scale
) {
304 genX(emit_hashing_mode
)(ice
, batch
, params
->x1
- params
->x0
,
305 params
->y1
- params
->y0
, scale
);
309 genX(invalidate_aux_map_state
)(batch
);
312 iris_handle_always_flush_cache(batch
);
314 blorp_exec(blorp_batch
, params
);
316 iris_handle_always_flush_cache(batch
);
318 /* We've smashed all state compared to what the normal 3D pipeline
319 * rendering tracks for GL.
322 uint64_t skip_bits
= (IRIS_DIRTY_POLYGON_STIPPLE
|
323 IRIS_DIRTY_SO_BUFFERS
|
324 IRIS_DIRTY_SO_DECL_LIST
|
325 IRIS_DIRTY_LINE_STIPPLE
|
326 IRIS_ALL_DIRTY_FOR_COMPUTE
|
327 IRIS_DIRTY_SCISSOR_RECT
|
329 IRIS_DIRTY_SF_CL_VIEWPORT
);
330 uint64_t skip_stage_bits
= (IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE
|
331 IRIS_STAGE_DIRTY_UNCOMPILED_VS
|
332 IRIS_STAGE_DIRTY_UNCOMPILED_TCS
|
333 IRIS_STAGE_DIRTY_UNCOMPILED_TES
|
334 IRIS_STAGE_DIRTY_UNCOMPILED_GS
|
335 IRIS_STAGE_DIRTY_UNCOMPILED_FS
|
336 IRIS_STAGE_DIRTY_SAMPLER_STATES_VS
|
337 IRIS_STAGE_DIRTY_SAMPLER_STATES_TCS
|
338 IRIS_STAGE_DIRTY_SAMPLER_STATES_TES
|
339 IRIS_STAGE_DIRTY_SAMPLER_STATES_GS
);
341 if (!ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
]) {
342 /* BLORP disabled tessellation, that's fine for the next draw */
343 skip_stage_bits
|= IRIS_STAGE_DIRTY_TCS
|
344 IRIS_STAGE_DIRTY_TES
|
345 IRIS_STAGE_DIRTY_CONSTANTS_TCS
|
346 IRIS_STAGE_DIRTY_CONSTANTS_TES
|
347 IRIS_STAGE_DIRTY_BINDINGS_TCS
|
348 IRIS_STAGE_DIRTY_BINDINGS_TES
;
351 if (!ice
->shaders
.uncompiled
[MESA_SHADER_GEOMETRY
]) {
352 /* BLORP disabled geometry shaders, that's fine for the next draw */
353 skip_stage_bits
|= IRIS_STAGE_DIRTY_GS
|
354 IRIS_STAGE_DIRTY_CONSTANTS_GS
|
355 IRIS_STAGE_DIRTY_BINDINGS_GS
;
358 /* we can skip flagging IRIS_DIRTY_DEPTH_BUFFER, if
359 * BLORP_BATCH_NO_EMIT_DEPTH_STENCIL is set.
361 if (blorp_batch
->flags
& BLORP_BATCH_NO_EMIT_DEPTH_STENCIL
)
362 skip_bits
|= IRIS_DIRTY_DEPTH_BUFFER
;
364 if (!params
->wm_prog_data
)
365 skip_bits
|= IRIS_DIRTY_BLEND_STATE
| IRIS_DIRTY_PS_BLEND
;
367 ice
->state
.dirty
|= ~skip_bits
;
368 ice
->state
.stage_dirty
|= ~skip_stage_bits
;
370 if (params
->dst
.enabled
) {
371 iris_render_cache_add_bo(batch
, params
->dst
.addr
.buffer
,
372 params
->dst
.view
.format
,
373 params
->dst
.aux_usage
);
375 if (params
->depth
.enabled
)
376 iris_depth_cache_add_bo(batch
, params
->depth
.addr
.buffer
);
377 if (params
->stencil
.enabled
)
378 iris_depth_cache_add_bo(batch
, params
->stencil
.addr
.buffer
);
380 if (params
->src
.enabled
)
381 iris_bo_bump_seqno(params
->src
.addr
.buffer
, batch
->next_seqno
,
382 IRIS_DOMAIN_OTHER_READ
);
383 if (params
->dst
.enabled
)
384 iris_bo_bump_seqno(params
->dst
.addr
.buffer
, batch
->next_seqno
,
385 IRIS_DOMAIN_RENDER_WRITE
);
386 if (params
->depth
.enabled
)
387 iris_bo_bump_seqno(params
->depth
.addr
.buffer
, batch
->next_seqno
,
388 IRIS_DOMAIN_DEPTH_WRITE
);
389 if (params
->stencil
.enabled
)
390 iris_bo_bump_seqno(params
->stencil
.addr
.buffer
, batch
->next_seqno
,
391 IRIS_DOMAIN_DEPTH_WRITE
);
395 genX(init_blorp
)(struct iris_context
*ice
)
397 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
399 blorp_init(&ice
->blorp
, ice
, &screen
->isl_dev
);
400 ice
->blorp
.compiler
= screen
->compiler
;
401 ice
->blorp
.lookup_shader
= iris_blorp_lookup_shader
;
402 ice
->blorp
.upload_shader
= iris_blorp_upload_shader
;
403 ice
->blorp
.exec
= iris_blorp_exec
;