2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "anv_private.h"
28 /* These are defined in anv_private.h and blorp_genX_exec.h */
29 #undef __gen_address_type
30 #undef __gen_user_data
31 #undef __gen_combine_address
33 #include "common/gen_l3_config.h"
34 #include "common/gen_sample_positions.h"
35 #include "blorp/blorp_genX_exec.h"
38 blorp_emit_dwords(struct blorp_batch
*batch
, unsigned n
)
40 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
41 return anv_batch_emit_dwords(&cmd_buffer
->batch
, n
);
45 blorp_emit_reloc(struct blorp_batch
*batch
,
46 void *location
, struct blorp_address address
, uint32_t delta
)
48 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
49 assert(cmd_buffer
->batch
.start
<= location
&&
50 location
< cmd_buffer
->batch
.end
);
51 return anv_batch_emit_reloc(&cmd_buffer
->batch
, location
,
52 address
.buffer
, address
.offset
+ delta
);
56 blorp_surface_reloc(struct blorp_batch
*batch
, uint32_t ss_offset
,
57 struct blorp_address address
, uint32_t delta
)
59 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
60 uint64_t address_u64
= 0;
62 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
,
63 ss_offset
, address
.buffer
, address
.offset
+ delta
,
65 if (result
!= VK_SUCCESS
)
66 anv_batch_set_error(&cmd_buffer
->batch
, result
);
68 void *dest
= anv_block_pool_map(
69 &cmd_buffer
->device
->surface_state_pool
.block_pool
, ss_offset
);
70 write_reloc(cmd_buffer
->device
, dest
, address_u64
, false);
74 blorp_get_surface_address(struct blorp_batch
*blorp_batch
,
75 struct blorp_address address
)
77 /* We'll let blorp_surface_reloc write the address. */
81 #if GEN_GEN >= 7 && GEN_GEN < 10
82 static struct blorp_address
83 blorp_get_surface_base_address(struct blorp_batch
*batch
)
85 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
86 return (struct blorp_address
) {
87 .buffer
= cmd_buffer
->device
->surface_state_pool
.block_pool
.bo
,
94 blorp_alloc_dynamic_state(struct blorp_batch
*batch
,
99 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
101 struct anv_state state
=
102 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
104 *offset
= state
.offset
;
109 blorp_alloc_binding_table(struct blorp_batch
*batch
, unsigned num_entries
,
110 unsigned state_size
, unsigned state_alignment
,
112 uint32_t *surface_offsets
, void **surface_maps
)
114 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
116 uint32_t state_offset
;
117 struct anv_state bt_state
;
120 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer
, num_entries
,
121 &state_offset
, &bt_state
);
122 if (result
!= VK_SUCCESS
)
125 uint32_t *bt_map
= bt_state
.map
;
126 *bt_offset
= bt_state
.offset
;
128 for (unsigned i
= 0; i
< num_entries
; i
++) {
129 struct anv_state surface_state
=
130 anv_cmd_buffer_alloc_surface_state(cmd_buffer
);
131 bt_map
[i
] = surface_state
.offset
+ state_offset
;
132 surface_offsets
[i
] = surface_state
.offset
;
133 surface_maps
[i
] = surface_state
.map
;
138 blorp_alloc_vertex_buffer(struct blorp_batch
*batch
, uint32_t size
,
139 struct blorp_address
*addr
)
141 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
143 /* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
145 * "The VF cache needs to be invalidated before binding and then using
146 * Vertex Buffers that overlap with any previously bound Vertex Buffer
147 * (at a 64B granularity) since the last invalidation. A VF cache
148 * invalidate is performed by setting the "VF Cache Invalidation Enable"
149 * bit in PIPE_CONTROL."
151 * This restriction first appears in the Skylake PRM but the internal docs
152 * also list it as being an issue on Broadwell. In order to avoid this
153 * problem, we align all vertex buffer allocations to 64 bytes.
155 struct anv_state vb_state
=
156 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 64);
158 *addr
= (struct blorp_address
) {
159 .buffer
= cmd_buffer
->device
->dynamic_state_pool
.block_pool
.bo
,
160 .offset
= vb_state
.offset
,
161 .mocs
= cmd_buffer
->device
->isl_dev
.mocs
.internal
,
168 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch
*batch
,
169 const struct blorp_address
*addrs
,
172 /* anv forces all vertex buffers into the low 4GB so there are never any
173 * transitions that require a VF invalidation.
178 static struct blorp_address
179 blorp_get_workaround_page(struct blorp_batch
*batch
)
181 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
183 return (struct blorp_address
) {
184 .buffer
= cmd_buffer
->device
->workaround_bo
,
190 blorp_flush_range(struct blorp_batch
*batch
, void *start
, size_t size
)
192 /* We don't need to flush states anymore, since everything will be snooped.
197 blorp_emit_urb_config(struct blorp_batch
*batch
,
198 unsigned vs_entry_size
, unsigned sf_entry_size
)
200 struct anv_device
*device
= batch
->blorp
->driver_ctx
;
201 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
203 assert(sf_entry_size
== 0);
205 const unsigned entry_size
[4] = { vs_entry_size
, 1, 1, 1 };
207 genX(emit_urb_setup
)(device
, &cmd_buffer
->batch
,
208 cmd_buffer
->state
.current_l3_config
,
209 VK_SHADER_STAGE_VERTEX_BIT
|
210 VK_SHADER_STAGE_FRAGMENT_BIT
,
215 genX(blorp_exec
)(struct blorp_batch
*batch
,
216 const struct blorp_params
*params
)
218 struct anv_cmd_buffer
*cmd_buffer
= batch
->driver_batch
;
220 if (!cmd_buffer
->state
.current_l3_config
) {
221 const struct gen_l3_config
*cfg
=
222 gen_get_default_l3_config(&cmd_buffer
->device
->info
);
223 genX(cmd_buffer_config_l3
)(cmd_buffer
, cfg
);
226 const unsigned scale
= params
->fast_clear_op
? UINT_MAX
: 1;
227 genX(cmd_buffer_emit_hashing_mode
)(cmd_buffer
, params
->x1
- params
->x0
,
228 params
->y1
- params
->y0
, scale
);
231 /* The PIPE_CONTROL command description says:
233 * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
234 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
235 * Target Cache Flush by enabling this bit. When render target flush
236 * is set due to new association of BTI, PS Scoreboard Stall bit must
237 * be set in this packet."
239 cmd_buffer
->state
.pending_pipe_bits
|=
240 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT
|
241 ANV_PIPE_STALL_AT_SCOREBOARD_BIT
;
245 /* The MI_LOAD/STORE_REGISTER_MEM commands which BLORP uses to implement
246 * indirect fast-clear colors can cause GPU hangs if we don't stall first.
247 * See genX(cmd_buffer_mi_memcpy) for more details.
249 if (params
->src
.clear_color_addr
.buffer
||
250 params
->dst
.clear_color_addr
.buffer
)
251 cmd_buffer
->state
.pending_pipe_bits
|= ANV_PIPE_CS_STALL_BIT
;
254 genX(cmd_buffer_apply_pipe_flushes
)(cmd_buffer
);
256 genX(flush_pipeline_select_3d
)(cmd_buffer
);
259 genX(cmd_buffer_aux_map_state
)(cmd_buffer
);
262 genX(cmd_buffer_emit_gen7_depth_flush
)(cmd_buffer
);
264 /* BLORP doesn't do anything fancy with depth such as discards, so we want
265 * the PMA fix off. Also, off is always the safe option.
267 genX(cmd_buffer_enable_pma_fix
)(cmd_buffer
, false);
269 blorp_exec(batch
, params
);
271 cmd_buffer
->state
.gfx
.vb_dirty
= ~0;
272 cmd_buffer
->state
.gfx
.dirty
= ~0;
273 cmd_buffer
->state
.push_constants_dirty
= ~0;