Merge branch 'master' of ../mesa into vulkan
[mesa.git] / src / gallium / drivers / radeonsi / si_cp_dma.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <maraeo@gmail.com>
25 */
26
27 #include "si_pipe.h"
28 #include "sid.h"
29 #include "radeon/r600_cs.h"
30
31
32 /* Set this if you want the 3D engine to wait until CP DMA is done.
33 * It should be set on the last CP DMA packet. */
34 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
35
36 /* Set this if the source data was used as a destination in a previous CP DMA
37 * packet. It's for preventing a read-after-write (RAW) hazard between two
38 * CP DMA packets. */
39 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
40 #define CIK_CP_DMA_USE_L2 (1 << 2)
41
42 /* Emit a CP DMA packet to do a copy from one buffer to another.
43 * The size must fit in bits [20:0].
44 */
45 static void si_emit_cp_dma_copy_buffer(struct si_context *sctx,
46 uint64_t dst_va, uint64_t src_va,
47 unsigned size, unsigned flags)
48 {
49 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
50 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
51 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
52 uint32_t sel = flags & CIK_CP_DMA_USE_L2 ?
53 S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2) |
54 S_411_DSL_SEL(V_411_DST_ADDR_TC_L2) : 0;
55
56 assert(size);
57 assert((size & ((1<<21)-1)) == size);
58
59 if (sctx->b.chip_class >= CIK) {
60 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
61 radeon_emit(cs, sync_flag | sel); /* CP_SYNC [31] */
62 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
63 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
64 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
65 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
66 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
67 } else {
68 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
69 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
70 radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
71 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
72 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
73 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
74 }
75 }
76
77 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
78 static void si_emit_cp_dma_clear_buffer(struct si_context *sctx,
79 uint64_t dst_va, unsigned size,
80 uint32_t clear_value, unsigned flags)
81 {
82 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
83 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
84 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
85 uint32_t dst_sel = flags & CIK_CP_DMA_USE_L2 ? S_411_DSL_SEL(V_411_DST_ADDR_TC_L2) : 0;
86
87 assert(size);
88 assert((size & ((1<<21)-1)) == size);
89
90 if (sctx->b.chip_class >= CIK) {
91 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
92 radeon_emit(cs, sync_flag | dst_sel | S_411_SRC_SEL(V_411_DATA)); /* CP_SYNC [31] | SRC_SEL[30:29] */
93 radeon_emit(cs, clear_value); /* DATA [31:0] */
94 radeon_emit(cs, 0);
95 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
96 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [15:0] */
97 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
98 } else {
99 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
100 radeon_emit(cs, clear_value); /* DATA [31:0] */
101 radeon_emit(cs, sync_flag | S_411_SRC_SEL(V_411_DATA)); /* CP_SYNC [31] | SRC_SEL[30:29] */
102 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
103 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
104 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
105 }
106 }
107
108 /* The max number of bytes to copy per packet. */
109 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
110
111 static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
112 unsigned offset, unsigned size, unsigned value,
113 bool is_framebuffer)
114 {
115 struct si_context *sctx = (struct si_context*)ctx;
116 unsigned flush_flags, tc_l2_flag;
117
118 if (!size)
119 return;
120
121 /* Mark the buffer range of destination as valid (initialized),
122 * so that transfer_map knows it should wait for the GPU when mapping
123 * that range. */
124 util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
125 offset + size);
126
127 /* Fallback for unaligned clears. */
128 if (offset % 4 != 0 || size % 4 != 0) {
129 uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
130 sctx->b.rings.gfx.cs,
131 PIPE_TRANSFER_WRITE);
132 size /= 4;
133 for (unsigned i = 0; i < size; i++)
134 *map++ = value;
135 return;
136 }
137
138 uint64_t va = r600_resource(dst)->gpu_address + offset;
139
140 /* Flush the caches where the resource is bound. */
141 if (is_framebuffer) {
142 flush_flags = SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER;
143 tc_l2_flag = 0;
144 } else {
145 flush_flags = SI_CONTEXT_INV_TC_L1 |
146 (sctx->b.chip_class == SI ? SI_CONTEXT_INV_TC_L2 : 0) |
147 SI_CONTEXT_INV_KCACHE;
148 tc_l2_flag = sctx->b.chip_class == SI ? 0 : CIK_CP_DMA_USE_L2;
149 }
150
151 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
152 flush_flags;
153
154 while (size) {
155 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
156 unsigned dma_flags = tc_l2_flag;
157
158 si_need_cs_space(sctx);
159
160 /* This must be done after need_cs_space. */
161 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
162 (struct r600_resource*)dst, RADEON_USAGE_WRITE,
163 RADEON_PRIO_MIN);
164
165 /* Flush the caches for the first copy only.
166 * Also wait for the previous CP DMA operations. */
167 if (sctx->b.flags) {
168 si_emit_cache_flush(sctx, NULL);
169 dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */
170 }
171
172 /* Do the synchronization after the last copy, so that all data is written to memory. */
173 if (size == byte_count)
174 dma_flags |= R600_CP_DMA_SYNC;
175
176 /* Emit the clear packet. */
177 si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags);
178
179 size -= byte_count;
180 va += byte_count;
181 }
182
183 /* Flush the caches again in case the 3D engine has been prefetching
184 * the resource. */
185 sctx->b.flags |= flush_flags;
186
187 if (tc_l2_flag)
188 r600_resource(dst)->TC_L2_dirty = true;
189 }
190
191 void si_copy_buffer(struct si_context *sctx,
192 struct pipe_resource *dst, struct pipe_resource *src,
193 uint64_t dst_offset, uint64_t src_offset, unsigned size,
194 bool is_framebuffer)
195 {
196 unsigned flush_flags, tc_l2_flag;
197
198 if (!size)
199 return;
200
201 /* Mark the buffer range of destination as valid (initialized),
202 * so that transfer_map knows it should wait for the GPU when mapping
203 * that range. */
204 util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
205 dst_offset + size);
206
207 dst_offset += r600_resource(dst)->gpu_address;
208 src_offset += r600_resource(src)->gpu_address;
209
210 /* Flush the caches where the resource is bound. */
211 if (is_framebuffer) {
212 flush_flags = SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER;
213 tc_l2_flag = 0;
214 } else {
215 flush_flags = SI_CONTEXT_INV_TC_L1 |
216 (sctx->b.chip_class == SI ? SI_CONTEXT_INV_TC_L2 : 0) |
217 SI_CONTEXT_INV_KCACHE;
218 tc_l2_flag = sctx->b.chip_class == SI ? 0 : CIK_CP_DMA_USE_L2;
219 }
220
221 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
222 flush_flags;
223
224 while (size) {
225 unsigned sync_flags = tc_l2_flag;
226 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
227
228 si_need_cs_space(sctx);
229
230 /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
231 if (sctx->b.flags) {
232 si_emit_cache_flush(sctx, NULL);
233 sync_flags |= SI_CP_DMA_RAW_WAIT;
234 }
235
236 /* Do the synchronization after the last copy, so that all data is written to memory. */
237 if (size == byte_count) {
238 sync_flags |= R600_CP_DMA_SYNC;
239 }
240
241 /* This must be done after r600_need_cs_space. */
242 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src,
243 RADEON_USAGE_READ, RADEON_PRIO_MIN);
244 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst,
245 RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
246
247 si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags);
248
249 size -= byte_count;
250 src_offset += byte_count;
251 dst_offset += byte_count;
252 }
253
254 /* Flush the caches again in case the 3D engine has been prefetching
255 * the resource. */
256 sctx->b.flags |= flush_flags;
257
258 if (tc_l2_flag)
259 r600_resource(dst)->TC_L2_dirty = true;
260 }
261
262 void si_init_cp_dma_functions(struct si_context *sctx)
263 {
264 sctx->b.clear_buffer = si_clear_buffer;
265 }