anv: Add a mi_memset and use it for zeroing queries
[mesa.git] / src / intel / vulkan / genX_gpu_memcpy.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28
29 #include "common/gen_l3_config.h"
30
31 /**
32 * This file implements some lightweight memcpy/memset operations on the GPU
33 * using a vertex buffer and streamout.
34 */
35
36 /**
37 * Returns the greatest common divisor of a and b that is a power of two.
38 */
39 static uint64_t
40 gcd_pow2_u64(uint64_t a, uint64_t b)
41 {
42 assert(a > 0 || b > 0);
43
44 unsigned a_log2 = ffsll(a) - 1;
45 unsigned b_log2 = ffsll(b) - 1;
46
47 /* If either a or b is 0, then a_log2 or b_log2 will be UINT_MAX in which
48 * case, the MIN2() will take the other one. If both are 0 then we will
49 * hit the assert above.
50 */
51 return 1 << MIN2(a_log2, b_log2);
52 }
53
54 void
55 genX(cmd_buffer_mi_memcpy)(struct anv_cmd_buffer *cmd_buffer,
56 struct anv_address dst, struct anv_address src,
57 uint32_t size)
58 {
59 /* This memcpy operates in units of dwords. */
60 assert(size % 4 == 0);
61 assert(dst.offset % 4 == 0);
62 assert(src.offset % 4 == 0);
63
64 #if GEN_GEN == 7
65 /* On gen7, the combination of commands used here(MI_LOAD_REGISTER_MEM
66 * and MI_STORE_REGISTER_MEM) can cause GPU hangs if any rendering is
67 * in-flight when they are issued even if the memory touched is not
68 * currently active for rendering. The weird bit is that it is not the
69 * MI_LOAD/STORE_REGISTER_MEM commands which hang but rather the in-flight
70 * rendering hangs such that the next stalling command after the
71 * MI_LOAD/STORE_REGISTER_MEM commands will catch the hang.
72 *
73 * It is unclear exactly why this hang occurs. Both MI commands come with
74 * warnings about the 3D pipeline but that doesn't seem to fully explain
75 * it. My (Jason's) best theory is that it has something to do with the
76 * fact that we're using a GPU state register as our temporary and that
77 * something with reading/writing it is causing problems.
78 *
79 * In order to work around this issue, we emit a PIPE_CONTROL with the
80 * command streamer stall bit set.
81 */
82 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
83 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
84 #endif
85
86 for (uint32_t i = 0; i < size; i += 4) {
87 #if GEN_GEN >= 8
88 anv_batch_emit(&cmd_buffer->batch, GENX(MI_COPY_MEM_MEM), cp) {
89 cp.DestinationMemoryAddress = anv_address_add(dst, i);
90 cp.SourceMemoryAddress = anv_address_add(src, i);
91 }
92 #else
93 /* IVB does not have a general purpose register for command streamer
94 * commands. Therefore, we use an alternate temporary register.
95 */
96 #define TEMP_REG 0x2440 /* GEN7_3DPRIM_BASE_VERTEX */
97 anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_MEM), load) {
98 load.RegisterAddress = TEMP_REG;
99 load.MemoryAddress = anv_address_add(src, i);
100 }
101 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), store) {
102 store.RegisterAddress = TEMP_REG;
103 store.MemoryAddress = anv_address_add(dst, i);
104 }
105 #undef TEMP_REG
106 #endif
107 }
108 return;
109 }
110
111 void
112 genX(cmd_buffer_mi_memset)(struct anv_cmd_buffer *cmd_buffer,
113 struct anv_address dst, uint32_t value,
114 uint32_t size)
115 {
116 /* This memset operates in units of dwords. */
117 assert(size % 4 == 0);
118 assert(dst.offset % 4 == 0);
119
120 for (uint32_t i = 0; i < size; i += 4) {
121 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
122 sdi.Address = anv_address_add(dst, i);
123 sdi.ImmediateData = value;
124 }
125 }
126 }
127
128 void
129 genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
130 struct anv_address dst, struct anv_address src,
131 uint32_t size)
132 {
133 if (size == 0)
134 return;
135
136 assert(dst.offset + size <= dst.bo->size);
137 assert(src.offset + size <= src.bo->size);
138
139 /* The maximum copy block size is 4 32-bit components at a time. */
140 unsigned bs = 16;
141 bs = gcd_pow2_u64(bs, src.offset);
142 bs = gcd_pow2_u64(bs, dst.offset);
143 bs = gcd_pow2_u64(bs, size);
144
145 enum isl_format format;
146 switch (bs) {
147 case 4: format = ISL_FORMAT_R32_UINT; break;
148 case 8: format = ISL_FORMAT_R32G32_UINT; break;
149 case 16: format = ISL_FORMAT_R32G32B32A32_UINT; break;
150 default:
151 unreachable("Invalid size");
152 }
153
154 if (!cmd_buffer->state.current_l3_config) {
155 const struct gen_l3_config *cfg =
156 gen_get_default_l3_config(&cmd_buffer->device->info);
157 genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
158 }
159
160 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
161
162 genX(flush_pipeline_select_3d)(cmd_buffer);
163
164 uint32_t *dw;
165 dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(3DSTATE_VERTEX_BUFFERS));
166 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, dw + 1,
167 &(struct GENX(VERTEX_BUFFER_STATE)) {
168 .VertexBufferIndex = 32, /* Reserved for this */
169 .AddressModifyEnable = true,
170 .BufferStartingAddress = src,
171 .BufferPitch = bs,
172 #if (GEN_GEN >= 8)
173 .MemoryObjectControlState = GENX(MOCS),
174 .BufferSize = size,
175 #else
176 .VertexBufferMemoryObjectControlState = GENX(MOCS),
177 .EndAddress = anv_address_add(src, size - 1),
178 #endif
179 });
180
181 dw = anv_batch_emitn(&cmd_buffer->batch, 3, GENX(3DSTATE_VERTEX_ELEMENTS));
182 GENX(VERTEX_ELEMENT_STATE_pack)(&cmd_buffer->batch, dw + 1,
183 &(struct GENX(VERTEX_ELEMENT_STATE)) {
184 .VertexBufferIndex = 32,
185 .Valid = true,
186 .SourceElementFormat = format,
187 .SourceElementOffset = 0,
188 .Component0Control = (bs >= 4) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
189 .Component1Control = (bs >= 8) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
190 .Component2Control = (bs >= 12) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
191 .Component3Control = (bs >= 16) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
192 });
193
194 #if GEN_GEN >= 8
195 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_SGVS), sgvs);
196 #endif
197
198 /* Disable all shader stages */
199 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VS), vs);
200 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HS), hs);
201 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_TE), te);
202 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DS), DS);
203 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_GS), gs);
204 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_PS), gs);
205
206 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SBE), sbe) {
207 sbe.VertexURBEntryReadOffset = 1;
208 sbe.NumberofSFOutputAttributes = 1;
209 sbe.VertexURBEntryReadLength = 1;
210 #if GEN_GEN >= 8
211 sbe.ForceVertexURBEntryReadLength = true;
212 sbe.ForceVertexURBEntryReadOffset = true;
213 #endif
214
215 #if GEN_GEN >= 9
216 for (unsigned i = 0; i < 32; i++)
217 sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
218 #endif
219 }
220
221 /* Emit URB setup. We tell it that the VS is active because we want it to
222 * allocate space for the VS. Even though one isn't run, we need VUEs to
223 * store the data that VF is going to pass to SOL.
224 */
225 const unsigned entry_size[4] = { DIV_ROUND_UP(32, 64), 1, 1, 1 };
226
227 genX(emit_urb_setup)(cmd_buffer->device, &cmd_buffer->batch,
228 cmd_buffer->state.current_l3_config,
229 VK_SHADER_STAGE_VERTEX_BIT, entry_size);
230
231 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SO_BUFFER), sob) {
232 sob.SOBufferIndex = 0;
233 sob.SOBufferObjectControlState = GENX(MOCS);
234 sob.SurfaceBaseAddress = dst;
235
236 #if GEN_GEN >= 8
237 sob.SOBufferEnable = true;
238 sob.SurfaceSize = size / 4 - 1;
239 #else
240 sob.SurfacePitch = bs;
241 sob.SurfaceEndAddress = anv_address_add(dst, size);
242 #endif
243
244 #if GEN_GEN >= 8
245 /* As SOL writes out data, it updates the SO_WRITE_OFFSET registers with
246 * the end position of the stream. We need to reset this value to 0 at
247 * the beginning of the run or else SOL will start at the offset from
248 * the previous draw.
249 */
250 sob.StreamOffsetWriteEnable = true;
251 sob.StreamOffset = 0;
252 #endif
253 }
254
255 #if GEN_GEN <= 7
256 /* The hardware can do this for us on BDW+ (see above) */
257 anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), load) {
258 load.RegisterOffset = GENX(SO_WRITE_OFFSET0_num);
259 load.DataDWord = 0;
260 }
261 #endif
262
263 dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(3DSTATE_SO_DECL_LIST),
264 .StreamtoBufferSelects0 = (1 << 0),
265 .NumEntries0 = 1);
266 GENX(SO_DECL_ENTRY_pack)(&cmd_buffer->batch, dw + 3,
267 &(struct GENX(SO_DECL_ENTRY)) {
268 .Stream0Decl = {
269 .OutputBufferSlot = 0,
270 .RegisterIndex = 0,
271 .ComponentMask = (1 << (bs / 4)) - 1,
272 },
273 });
274
275 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STREAMOUT), so) {
276 so.SOFunctionEnable = true;
277 so.RenderingDisable = true;
278 so.Stream0VertexReadOffset = 0;
279 so.Stream0VertexReadLength = DIV_ROUND_UP(32, 64);
280 #if GEN_GEN >= 8
281 so.Buffer0SurfacePitch = bs;
282 #else
283 so.SOBufferEnable0 = true;
284 #endif
285 }
286
287 #if GEN_GEN >= 8
288 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
289 topo.PrimitiveTopologyType = _3DPRIM_POINTLIST;
290 }
291 #endif
292
293 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF_STATISTICS), vf) {
294 vf.StatisticsEnable = false;
295 }
296
297 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
298 prim.VertexAccessType = SEQUENTIAL;
299 prim.PrimitiveTopologyType = _3DPRIM_POINTLIST;
300 prim.VertexCountPerInstance = size / bs;
301 prim.StartVertexLocation = 0;
302 prim.InstanceCount = 1;
303 prim.StartInstanceLocation = 0;
304 prim.BaseVertexLocation = 0;
305 }
306
307 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
308 }