2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "anv_private.h"
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
29 #include "common/gen_l3_config.h"
32 * This file implements some lightweight memcpy/memset operations on the GPU
33 * using a vertex buffer and streamout.
37 * Returns the greatest common divisor of a and b that is a power of two.
40 gcd_pow2_u64(uint64_t a
, uint64_t b
)
42 assert(a
> 0 || b
> 0);
44 unsigned a_log2
= ffsll(a
) - 1;
45 unsigned b_log2
= ffsll(b
) - 1;
47 /* If either a or b is 0, then a_log2 or b_log2 will be UINT_MAX in which
48 * case, the MIN2() will take the other one. If both are 0 then we will
49 * hit the assert above.
51 return 1 << MIN2(a_log2
, b_log2
);
55 genX(cmd_buffer_mi_memcpy
)(struct anv_cmd_buffer
*cmd_buffer
,
56 struct anv_address dst
, struct anv_address src
,
59 /* This memcpy operates in units of dwords. */
60 assert(size
% 4 == 0);
61 assert(dst
.offset
% 4 == 0);
62 assert(src
.offset
% 4 == 0);
65 /* On gen7, the combination of commands used here(MI_LOAD_REGISTER_MEM
66 * and MI_STORE_REGISTER_MEM) can cause GPU hangs if any rendering is
67 * in-flight when they are issued even if the memory touched is not
68 * currently active for rendering. The weird bit is that it is not the
69 * MI_LOAD/STORE_REGISTER_MEM commands which hang but rather the in-flight
70 * rendering hangs such that the next stalling command after the
71 * MI_LOAD/STORE_REGISTER_MEM commands will catch the hang.
73 * It is unclear exactly why this hang occurs. Both MI commands come with
74 * warnings about the 3D pipeline but that doesn't seem to fully explain
75 * it. My (Jason's) best theory is that it has something to do with the
76 * fact that we're using a GPU state register as our temporary and that
77 * something with reading/writing it is causing problems.
79 * In order to work around this issue, we emit a PIPE_CONTROL with the
80 * command streamer stall bit set.
82 cmd_buffer
->state
.pending_pipe_bits
|= ANV_PIPE_CS_STALL_BIT
;
83 genX(cmd_buffer_apply_pipe_flushes
)(cmd_buffer
);
86 for (uint32_t i
= 0; i
< size
; i
+= 4) {
88 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_COPY_MEM_MEM
), cp
) {
89 cp
.DestinationMemoryAddress
= anv_address_add(dst
, i
);
90 cp
.SourceMemoryAddress
= anv_address_add(src
, i
);
93 /* IVB does not have a general purpose register for command streamer
94 * commands. Therefore, we use an alternate temporary register.
96 #define TEMP_REG 0x2440 /* GEN7_3DPRIM_BASE_VERTEX */
97 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_LOAD_REGISTER_MEM
), load
) {
98 load
.RegisterAddress
= TEMP_REG
;
99 load
.MemoryAddress
= anv_address_add(src
, i
);
101 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_REGISTER_MEM
), store
) {
102 store
.RegisterAddress
= TEMP_REG
;
103 store
.MemoryAddress
= anv_address_add(dst
, i
);
112 genX(cmd_buffer_mi_memset
)(struct anv_cmd_buffer
*cmd_buffer
,
113 struct anv_address dst
, uint32_t value
,
116 /* This memset operates in units of dwords. */
117 assert(size
% 4 == 0);
118 assert(dst
.offset
% 4 == 0);
120 for (uint32_t i
= 0; i
< size
; i
+= 4) {
121 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_STORE_DATA_IMM
), sdi
) {
122 sdi
.Address
= anv_address_add(dst
, i
);
123 sdi
.ImmediateData
= value
;
129 genX(cmd_buffer_so_memcpy
)(struct anv_cmd_buffer
*cmd_buffer
,
130 struct anv_address dst
, struct anv_address src
,
136 assert(dst
.offset
+ size
<= dst
.bo
->size
);
137 assert(src
.offset
+ size
<= src
.bo
->size
);
139 /* The maximum copy block size is 4 32-bit components at a time. */
141 bs
= gcd_pow2_u64(bs
, src
.offset
);
142 bs
= gcd_pow2_u64(bs
, dst
.offset
);
143 bs
= gcd_pow2_u64(bs
, size
);
145 enum isl_format format
;
147 case 4: format
= ISL_FORMAT_R32_UINT
; break;
148 case 8: format
= ISL_FORMAT_R32G32_UINT
; break;
149 case 16: format
= ISL_FORMAT_R32G32B32A32_UINT
; break;
151 unreachable("Invalid size");
154 if (!cmd_buffer
->state
.current_l3_config
) {
155 const struct gen_l3_config
*cfg
=
156 gen_get_default_l3_config(&cmd_buffer
->device
->info
);
157 genX(cmd_buffer_config_l3
)(cmd_buffer
, cfg
);
160 genX(cmd_buffer_apply_pipe_flushes
)(cmd_buffer
);
162 genX(flush_pipeline_select_3d
)(cmd_buffer
);
165 dw
= anv_batch_emitn(&cmd_buffer
->batch
, 5, GENX(3DSTATE_VERTEX_BUFFERS
));
166 GENX(VERTEX_BUFFER_STATE_pack
)(&cmd_buffer
->batch
, dw
+ 1,
167 &(struct GENX(VERTEX_BUFFER_STATE
)) {
168 .VertexBufferIndex
= 32, /* Reserved for this */
169 .AddressModifyEnable
= true,
170 .BufferStartingAddress
= src
,
173 .MemoryObjectControlState
= GENX(MOCS
),
176 .VertexBufferMemoryObjectControlState
= GENX(MOCS
),
177 .EndAddress
= anv_address_add(src
, size
- 1),
181 dw
= anv_batch_emitn(&cmd_buffer
->batch
, 3, GENX(3DSTATE_VERTEX_ELEMENTS
));
182 GENX(VERTEX_ELEMENT_STATE_pack
)(&cmd_buffer
->batch
, dw
+ 1,
183 &(struct GENX(VERTEX_ELEMENT_STATE
)) {
184 .VertexBufferIndex
= 32,
186 .SourceElementFormat
= format
,
187 .SourceElementOffset
= 0,
188 .Component0Control
= (bs
>= 4) ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
189 .Component1Control
= (bs
>= 8) ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
190 .Component2Control
= (bs
>= 12) ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
191 .Component3Control
= (bs
>= 16) ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
195 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF_SGVS
), sgvs
);
198 /* Disable all shader stages */
199 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VS
), vs
);
200 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_HS
), hs
);
201 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_TE
), te
);
202 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_DS
), DS
);
203 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_GS
), gs
);
204 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_PS
), gs
);
206 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_SBE
), sbe
) {
207 sbe
.VertexURBEntryReadOffset
= 1;
208 sbe
.NumberofSFOutputAttributes
= 1;
209 sbe
.VertexURBEntryReadLength
= 1;
211 sbe
.ForceVertexURBEntryReadLength
= true;
212 sbe
.ForceVertexURBEntryReadOffset
= true;
216 for (unsigned i
= 0; i
< 32; i
++)
217 sbe
.AttributeActiveComponentFormat
[i
] = ACF_XYZW
;
221 /* Emit URB setup. We tell it that the VS is active because we want it to
222 * allocate space for the VS. Even though one isn't run, we need VUEs to
223 * store the data that VF is going to pass to SOL.
225 const unsigned entry_size
[4] = { DIV_ROUND_UP(32, 64), 1, 1, 1 };
227 genX(emit_urb_setup
)(cmd_buffer
->device
, &cmd_buffer
->batch
,
228 cmd_buffer
->state
.current_l3_config
,
229 VK_SHADER_STAGE_VERTEX_BIT
, entry_size
);
231 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_SO_BUFFER
), sob
) {
232 sob
.SOBufferIndex
= 0;
233 sob
.SOBufferObjectControlState
= GENX(MOCS
);
234 sob
.SurfaceBaseAddress
= dst
;
237 sob
.SOBufferEnable
= true;
238 sob
.SurfaceSize
= size
/ 4 - 1;
240 sob
.SurfacePitch
= bs
;
241 sob
.SurfaceEndAddress
= anv_address_add(dst
, size
);
245 /* As SOL writes out data, it updates the SO_WRITE_OFFSET registers with
246 * the end position of the stream. We need to reset this value to 0 at
247 * the beginning of the run or else SOL will start at the offset from
250 sob
.StreamOffsetWriteEnable
= true;
251 sob
.StreamOffset
= 0;
256 /* The hardware can do this for us on BDW+ (see above) */
257 anv_batch_emit(&cmd_buffer
->batch
, GENX(MI_LOAD_REGISTER_IMM
), load
) {
258 load
.RegisterOffset
= GENX(SO_WRITE_OFFSET0_num
);
263 dw
= anv_batch_emitn(&cmd_buffer
->batch
, 5, GENX(3DSTATE_SO_DECL_LIST
),
264 .StreamtoBufferSelects0
= (1 << 0),
266 GENX(SO_DECL_ENTRY_pack
)(&cmd_buffer
->batch
, dw
+ 3,
267 &(struct GENX(SO_DECL_ENTRY
)) {
269 .OutputBufferSlot
= 0,
271 .ComponentMask
= (1 << (bs
/ 4)) - 1,
275 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_STREAMOUT
), so
) {
276 so
.SOFunctionEnable
= true;
277 so
.RenderingDisable
= true;
278 so
.Stream0VertexReadOffset
= 0;
279 so
.Stream0VertexReadLength
= DIV_ROUND_UP(32, 64);
281 so
.Buffer0SurfacePitch
= bs
;
283 so
.SOBufferEnable0
= true;
288 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF_TOPOLOGY
), topo
) {
289 topo
.PrimitiveTopologyType
= _3DPRIM_POINTLIST
;
293 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DSTATE_VF_STATISTICS
), vf
) {
294 vf
.StatisticsEnable
= false;
297 anv_batch_emit(&cmd_buffer
->batch
, GENX(3DPRIMITIVE
), prim
) {
298 prim
.VertexAccessType
= SEQUENTIAL
;
299 prim
.PrimitiveTopologyType
= _3DPRIM_POINTLIST
;
300 prim
.VertexCountPerInstance
= size
/ bs
;
301 prim
.StartVertexLocation
= 0;
302 prim
.InstanceCount
= 1;
303 prim
.StartInstanceLocation
= 0;
304 prim
.BaseVertexLocation
= 0;
307 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;