gallium/radeon: rename r600_context_bo_reloc -> radeon_add_to_buffer_list
[mesa.git] / src / gallium / drivers / radeonsi / si_cp_dma.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <maraeo@gmail.com>
25 */
26
27 #include "si_pipe.h"
28 #include "sid.h"
29 #include "radeon/r600_cs.h"
30
31
32 /* Set this if you want the 3D engine to wait until CP DMA is done.
33 * It should be set on the last CP DMA packet. */
34 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
35
36 /* Set this if the source data was used as a destination in a previous CP DMA
37 * packet. It's for preventing a read-after-write (RAW) hazard between two
38 * CP DMA packets. */
39 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
40 #define CIK_CP_DMA_USE_L2 (1 << 2)
41
42 /* Emit a CP DMA packet to do a copy from one buffer to another.
43 * The size must fit in bits [20:0].
44 */
45 static void si_emit_cp_dma_copy_buffer(struct si_context *sctx,
46 uint64_t dst_va, uint64_t src_va,
47 unsigned size, unsigned flags)
48 {
49 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
50 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
51 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
52 uint32_t sel = flags & CIK_CP_DMA_USE_L2 ?
53 S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2) |
54 S_411_DSL_SEL(V_411_DST_ADDR_TC_L2) : 0;
55
56 assert(size);
57 assert((size & ((1<<21)-1)) == size);
58
59 if (sctx->b.chip_class >= CIK) {
60 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
61 radeon_emit(cs, sync_flag | sel); /* CP_SYNC [31] */
62 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
63 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
64 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
65 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
66 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
67 } else {
68 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
69 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
70 radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
71 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
72 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
73 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
74 }
75 }
76
77 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
78 static void si_emit_cp_dma_clear_buffer(struct si_context *sctx,
79 uint64_t dst_va, unsigned size,
80 uint32_t clear_value, unsigned flags)
81 {
82 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
83 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? S_411_CP_SYNC(1) : 0;
84 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? S_414_RAW_WAIT(1) : 0;
85 uint32_t dst_sel = flags & CIK_CP_DMA_USE_L2 ? S_411_DSL_SEL(V_411_DST_ADDR_TC_L2) : 0;
86
87 assert(size);
88 assert((size & ((1<<21)-1)) == size);
89
90 if (sctx->b.chip_class >= CIK) {
91 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
92 radeon_emit(cs, sync_flag | dst_sel | S_411_SRC_SEL(V_411_DATA)); /* CP_SYNC [31] | SRC_SEL[30:29] */
93 radeon_emit(cs, clear_value); /* DATA [31:0] */
94 radeon_emit(cs, 0);
95 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
96 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [15:0] */
97 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
98 } else {
99 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
100 radeon_emit(cs, clear_value); /* DATA [31:0] */
101 radeon_emit(cs, sync_flag | S_411_SRC_SEL(V_411_DATA)); /* CP_SYNC [31] | SRC_SEL[30:29] */
102 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
103 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
104 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
105 }
106 }
107
108 /* The max number of bytes to copy per packet. */
109 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
110
111 static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
112 unsigned offset, unsigned size, unsigned value,
113 bool is_framebuffer)
114 {
115 struct si_context *sctx = (struct si_context*)ctx;
116 unsigned flush_flags, tc_l2_flag;
117
118 if (!size)
119 return;
120
121 /* Mark the buffer range of destination as valid (initialized),
122 * so that transfer_map knows it should wait for the GPU when mapping
123 * that range. */
124 util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
125 offset + size);
126
127 /* Fallback for unaligned clears. */
128 if (offset % 4 != 0 || size % 4 != 0) {
129 uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
130 sctx->b.rings.gfx.cs,
131 PIPE_TRANSFER_WRITE);
132 size /= 4;
133 for (unsigned i = 0; i < size; i++)
134 *map++ = value;
135 return;
136 }
137
138 uint64_t va = r600_resource(dst)->gpu_address + offset;
139
140 /* Flush the caches where the resource is bound. */
141 if (is_framebuffer) {
142 flush_flags = SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER;
143 tc_l2_flag = 0;
144 } else {
145 flush_flags = SI_CONTEXT_INV_TC_L1 |
146 (sctx->b.chip_class == SI ? SI_CONTEXT_INV_TC_L2 : 0) |
147 SI_CONTEXT_INV_KCACHE;
148 tc_l2_flag = sctx->b.chip_class == SI ? 0 : CIK_CP_DMA_USE_L2;
149 }
150
151 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
152 flush_flags;
153
154 while (size) {
155 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
156 unsigned dma_flags = tc_l2_flag;
157
158 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0),
159 FALSE);
160
161 /* This must be done after need_cs_space. */
162 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
163 (struct r600_resource*)dst, RADEON_USAGE_WRITE,
164 RADEON_PRIO_MIN);
165
166 /* Flush the caches for the first copy only.
167 * Also wait for the previous CP DMA operations. */
168 if (sctx->b.flags) {
169 si_emit_cache_flush(sctx, NULL);
170 dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */
171 }
172
173 /* Do the synchronization after the last copy, so that all data is written to memory. */
174 if (size == byte_count)
175 dma_flags |= R600_CP_DMA_SYNC;
176
177 /* Emit the clear packet. */
178 si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags);
179
180 size -= byte_count;
181 va += byte_count;
182 }
183
184 /* Flush the caches again in case the 3D engine has been prefetching
185 * the resource. */
186 sctx->b.flags |= flush_flags;
187
188 if (tc_l2_flag)
189 r600_resource(dst)->TC_L2_dirty = true;
190 }
191
192 void si_copy_buffer(struct si_context *sctx,
193 struct pipe_resource *dst, struct pipe_resource *src,
194 uint64_t dst_offset, uint64_t src_offset, unsigned size,
195 bool is_framebuffer)
196 {
197 unsigned flush_flags, tc_l2_flag;
198
199 if (!size)
200 return;
201
202 /* Mark the buffer range of destination as valid (initialized),
203 * so that transfer_map knows it should wait for the GPU when mapping
204 * that range. */
205 util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
206 dst_offset + size);
207
208 dst_offset += r600_resource(dst)->gpu_address;
209 src_offset += r600_resource(src)->gpu_address;
210
211 /* Flush the caches where the resource is bound. */
212 if (is_framebuffer) {
213 flush_flags = SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER;
214 tc_l2_flag = 0;
215 } else {
216 flush_flags = SI_CONTEXT_INV_TC_L1 |
217 (sctx->b.chip_class == SI ? SI_CONTEXT_INV_TC_L2 : 0) |
218 SI_CONTEXT_INV_KCACHE;
219 tc_l2_flag = sctx->b.chip_class == SI ? 0 : CIK_CP_DMA_USE_L2;
220 }
221
222 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
223 flush_flags;
224
225 while (size) {
226 unsigned sync_flags = tc_l2_flag;
227 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
228
229 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE);
230
231 /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
232 if (sctx->b.flags) {
233 si_emit_cache_flush(sctx, NULL);
234 sync_flags |= SI_CP_DMA_RAW_WAIT;
235 }
236
237 /* Do the synchronization after the last copy, so that all data is written to memory. */
238 if (size == byte_count) {
239 sync_flags |= R600_CP_DMA_SYNC;
240 }
241
242 /* This must be done after r600_need_cs_space. */
243 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src,
244 RADEON_USAGE_READ, RADEON_PRIO_MIN);
245 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst,
246 RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
247
248 si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags);
249
250 size -= byte_count;
251 src_offset += byte_count;
252 dst_offset += byte_count;
253 }
254
255 /* Flush the caches again in case the 3D engine has been prefetching
256 * the resource. */
257 sctx->b.flags |= flush_flags;
258
259 if (tc_l2_flag)
260 r600_resource(dst)->TC_L2_dirty = true;
261 }
262
263 void si_init_cp_dma_functions(struct si_context *sctx)
264 {
265 sctx->b.clear_buffer = si_clear_buffer;
266 }