r600g,radeonsi: consolidate transfer, cmask, and fmask structures
[mesa.git] / src / gallium / drivers / r600 / r600_hw_context.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #include "r600_pipe.h"
27 #include "r600d.h"
28 #include "util/u_memory.h"
29 #include <errno.h>
30 #include <unistd.h>
31
32 /* Get backends mask */
33 void r600_get_backend_mask(struct r600_context *ctx)
34 {
35 struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
36 struct r600_resource *buffer;
37 uint32_t *results;
38 unsigned num_backends = ctx->screen->b.info.r600_num_backends;
39 unsigned i, mask = 0;
40 uint64_t va;
41
42 /* if backend_map query is supported by the kernel */
43 if (ctx->screen->b.info.r600_backend_map_valid) {
44 unsigned num_tile_pipes = ctx->screen->b.info.r600_num_tile_pipes;
45 unsigned backend_map = ctx->screen->b.info.r600_backend_map;
46 unsigned item_width, item_mask;
47
48 if (ctx->b.chip_class >= EVERGREEN) {
49 item_width = 4;
50 item_mask = 0x7;
51 } else {
52 item_width = 2;
53 item_mask = 0x3;
54 }
55
56 while(num_tile_pipes--) {
57 i = backend_map & item_mask;
58 mask |= (1<<i);
59 backend_map >>= item_width;
60 }
61 if (mask != 0) {
62 ctx->backend_mask = mask;
63 return;
64 }
65 }
66
67 /* otherwise backup path for older kernels */
68
69 /* create buffer for event data */
70 buffer = (struct r600_resource*)
71 pipe_buffer_create(&ctx->screen->b.b, PIPE_BIND_CUSTOM,
72 PIPE_USAGE_STAGING, ctx->max_db*16);
73 if (!buffer)
74 goto err;
75 va = r600_resource_va(&ctx->screen->b.b, (void*)buffer);
76
77 /* initialize buffer with zeroes */
78 results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
79 if (results) {
80 memset(results, 0, ctx->max_db * 4 * 4);
81 ctx->b.ws->buffer_unmap(buffer->cs_buf);
82
83 /* emit EVENT_WRITE for ZPASS_DONE */
84 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
85 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
86 cs->buf[cs->cdw++] = va;
87 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
88
89 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
90 cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, buffer, RADEON_USAGE_WRITE);
91
92 /* analyze results */
93 results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
94 if (results) {
95 for(i = 0; i < ctx->max_db; i++) {
96 /* at least highest bit will be set if backend is used */
97 if (results[i*4 + 1])
98 mask |= (1<<i);
99 }
100 ctx->b.ws->buffer_unmap(buffer->cs_buf);
101 }
102 }
103
104 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
105
106 if (mask != 0) {
107 ctx->backend_mask = mask;
108 return;
109 }
110
111 err:
112 /* fallback to old method - set num_backends lower bits to 1 */
113 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
114 return;
115 }
116
117 void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
118 boolean count_draw_in)
119 {
120 if (!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs, ctx->b.vram, ctx->b.gtt)) {
121 ctx->b.gtt = 0;
122 ctx->b.vram = 0;
123 ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
124 return;
125 }
126 /* all will be accounted once relocation are emited */
127 ctx->b.gtt = 0;
128 ctx->b.vram = 0;
129
130 /* The number of dwords we already used in the CS so far. */
131 num_dw += ctx->b.rings.gfx.cs->cdw;
132
133 if (count_draw_in) {
134 unsigned i;
135
136 /* The number of dwords all the dirty states would take. */
137 for (i = 0; i < R600_NUM_ATOMS; i++) {
138 if (ctx->atoms[i] && ctx->atoms[i]->dirty) {
139 num_dw += ctx->atoms[i]->num_dw;
140 if (ctx->screen->trace_bo) {
141 num_dw += R600_TRACE_CS_DWORDS;
142 }
143 }
144 }
145
146 /* The upper-bound of how much space a draw command would take. */
147 num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
148 if (ctx->screen->trace_bo) {
149 num_dw += R600_TRACE_CS_DWORDS;
150 }
151 }
152
153 /* Count in queries_suspend. */
154 num_dw += ctx->num_cs_dw_nontimer_queries_suspend;
155
156 /* Count in streamout_end at the end of CS. */
157 if (ctx->b.streamout.begin_emitted) {
158 num_dw += ctx->b.streamout.num_dw_for_end;
159 }
160
161 /* Count in render_condition(NULL) at the end of CS. */
162 if (ctx->predicate_drawing) {
163 num_dw += 3;
164 }
165
166 /* SX_MISC */
167 if (ctx->b.chip_class <= R700) {
168 num_dw += 3;
169 }
170
171 /* Count in framebuffer cache flushes at the end of CS. */
172 num_dw += R600_MAX_FLUSH_CS_DWORDS;
173
174 /* The fence at the end of CS. */
175 num_dw += 10;
176
177 /* Flush if there's not enough space. */
178 if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
179 ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
180 }
181 }
182
183 void r600_flush_emit(struct r600_context *rctx)
184 {
185 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
186 unsigned cp_coher_cntl = 0;
187 unsigned wait_until = 0;
188
189 if (!rctx->b.flags) {
190 return;
191 }
192
193 if (rctx->b.flags & R600_CONTEXT_WAIT_3D_IDLE) {
194 wait_until |= S_008040_WAIT_3D_IDLE(1);
195 }
196 if (rctx->b.flags & R600_CONTEXT_WAIT_CP_DMA_IDLE) {
197 wait_until |= S_008040_WAIT_CP_DMA_IDLE(1);
198 }
199
200 if (wait_until) {
201 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
202 if (rctx->b.family >= CHIP_CAYMAN) {
203 /* emit a PS partial flush on Cayman/TN */
204 rctx->b.flags |= R600_CONTEXT_PS_PARTIAL_FLUSH;
205 }
206 }
207
208 if (rctx->b.flags & R600_CONTEXT_PS_PARTIAL_FLUSH) {
209 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
210 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
211 }
212
213 if (rctx->b.chip_class >= R700 &&
214 (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_CB_META)) {
215 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
216 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0);
217 }
218
219 if (rctx->b.chip_class >= R700 &&
220 (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_DB_META)) {
221 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
222 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0);
223
224 /* Set FULL_CACHE_ENA for DB META flushes on r7xx and later.
225 *
226 * This hack predates use of FLUSH_AND_INV_DB_META, so it's
227 * unclear whether it's still needed or even whether it has
228 * any effect.
229 */
230 cp_coher_cntl |= S_0085F0_FULL_CACHE_ENA(1);
231 }
232
233 if (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV) {
234 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
235 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
236 }
237
238 if (rctx->b.flags & R600_CONTEXT_INV_CONST_CACHE) {
239 /* Direct constant addressing uses the shader cache.
240 * Indirect contant addressing uses the vertex cache. */
241 cp_coher_cntl |= S_0085F0_SH_ACTION_ENA(1) |
242 (rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1)
243 : S_0085F0_TC_ACTION_ENA(1));
244 }
245 if (rctx->b.flags & R600_CONTEXT_INV_VERTEX_CACHE) {
246 cp_coher_cntl |= rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1)
247 : S_0085F0_TC_ACTION_ENA(1);
248 }
249 if (rctx->b.flags & R600_CONTEXT_INV_TEX_CACHE) {
250 /* Textures use the texture cache.
251 * Texture buffer objects use the vertex cache. */
252 cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) |
253 (rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1) : 0);
254 }
255
256 /* Don't use the DB CP COHER logic on r6xx.
257 * There are hw bugs.
258 */
259 if (rctx->b.chip_class >= R700 &&
260 (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_DB)) {
261 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
262 S_0085F0_DB_DEST_BASE_ENA(1) |
263 S_0085F0_SMX_ACTION_ENA(1);
264 }
265
266 /* Don't use the CB CP COHER logic on r6xx.
267 * There are hw bugs.
268 */
269 if (rctx->b.chip_class >= R700 &&
270 (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_CB)) {
271 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
272 S_0085F0_CB0_DEST_BASE_ENA(1) |
273 S_0085F0_CB1_DEST_BASE_ENA(1) |
274 S_0085F0_CB2_DEST_BASE_ENA(1) |
275 S_0085F0_CB3_DEST_BASE_ENA(1) |
276 S_0085F0_CB4_DEST_BASE_ENA(1) |
277 S_0085F0_CB5_DEST_BASE_ENA(1) |
278 S_0085F0_CB6_DEST_BASE_ENA(1) |
279 S_0085F0_CB7_DEST_BASE_ENA(1) |
280 S_0085F0_SMX_ACTION_ENA(1);
281 if (rctx->b.chip_class >= EVERGREEN)
282 cp_coher_cntl |= S_0085F0_CB8_DEST_BASE_ENA(1) |
283 S_0085F0_CB9_DEST_BASE_ENA(1) |
284 S_0085F0_CB10_DEST_BASE_ENA(1) |
285 S_0085F0_CB11_DEST_BASE_ENA(1);
286 }
287
288 if (rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH) {
289 cp_coher_cntl |= S_0085F0_SO0_DEST_BASE_ENA(1) |
290 S_0085F0_SO1_DEST_BASE_ENA(1) |
291 S_0085F0_SO2_DEST_BASE_ENA(1) |
292 S_0085F0_SO3_DEST_BASE_ENA(1) |
293 S_0085F0_SMX_ACTION_ENA(1);
294 }
295
296 if (cp_coher_cntl) {
297 cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_SYNC, 3, 0);
298 cs->buf[cs->cdw++] = cp_coher_cntl; /* CP_COHER_CNTL */
299 cs->buf[cs->cdw++] = 0xffffffff; /* CP_COHER_SIZE */
300 cs->buf[cs->cdw++] = 0; /* CP_COHER_BASE */
301 cs->buf[cs->cdw++] = 0x0000000A; /* POLL_INTERVAL */
302 }
303
304 if (wait_until) {
305 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
306 if (rctx->b.family < CHIP_CAYMAN) {
307 /* wait for things to settle */
308 r600_write_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
309 }
310 }
311
312 /* everything is properly flushed */
313 rctx->b.flags = 0;
314 }
315
316 void r600_context_flush(struct r600_context *ctx, unsigned flags)
317 {
318 struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
319
320 ctx->nontimer_queries_suspended = false;
321 ctx->b.streamout.suspended = false;
322
323 /* suspend queries */
324 if (ctx->num_cs_dw_nontimer_queries_suspend) {
325 r600_suspend_nontimer_queries(ctx);
326 ctx->nontimer_queries_suspended = true;
327 }
328
329 if (ctx->b.streamout.begin_emitted) {
330 r600_emit_streamout_end(&ctx->b);
331 ctx->b.streamout.suspended = true;
332 }
333
334 /* flush is needed to avoid lockups on some chips with user fences
335 * this will also flush the framebuffer cache
336 */
337 ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV |
338 R600_CONTEXT_FLUSH_AND_INV_CB |
339 R600_CONTEXT_FLUSH_AND_INV_DB |
340 R600_CONTEXT_FLUSH_AND_INV_CB_META |
341 R600_CONTEXT_FLUSH_AND_INV_DB_META |
342 R600_CONTEXT_WAIT_3D_IDLE |
343 R600_CONTEXT_WAIT_CP_DMA_IDLE;
344
345 r600_flush_emit(ctx);
346
347 /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
348 if (ctx->b.chip_class <= R700) {
349 r600_write_context_reg(cs, R_028350_SX_MISC, 0);
350 }
351
352 /* force to keep tiling flags */
353 if (ctx->keep_tiling_flags) {
354 flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
355 }
356
357 /* Flush the CS. */
358 ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, ctx->screen->cs_count++);
359 }
360
361 void r600_begin_new_cs(struct r600_context *ctx)
362 {
363 unsigned shader;
364
365 ctx->b.flags = 0;
366 ctx->b.gtt = 0;
367 ctx->b.vram = 0;
368
369 /* Begin a new CS. */
370 r600_emit_command_buffer(ctx->b.rings.gfx.cs, &ctx->start_cs_cmd);
371
372 /* Re-emit states. */
373 ctx->alphatest_state.atom.dirty = true;
374 ctx->blend_color.atom.dirty = true;
375 ctx->cb_misc_state.atom.dirty = true;
376 ctx->clip_misc_state.atom.dirty = true;
377 ctx->clip_state.atom.dirty = true;
378 ctx->db_misc_state.atom.dirty = true;
379 ctx->db_state.atom.dirty = true;
380 ctx->framebuffer.atom.dirty = true;
381 ctx->pixel_shader.atom.dirty = true;
382 ctx->poly_offset_state.atom.dirty = true;
383 ctx->vgt_state.atom.dirty = true;
384 ctx->sample_mask.atom.dirty = true;
385 ctx->scissor.atom.dirty = true;
386 ctx->config_state.atom.dirty = true;
387 ctx->stencil_ref.atom.dirty = true;
388 ctx->vertex_fetch_shader.atom.dirty = true;
389 ctx->vertex_shader.atom.dirty = true;
390 ctx->viewport.atom.dirty = true;
391
392 if (ctx->blend_state.cso)
393 ctx->blend_state.atom.dirty = true;
394 if (ctx->dsa_state.cso)
395 ctx->dsa_state.atom.dirty = true;
396 if (ctx->rasterizer_state.cso)
397 ctx->rasterizer_state.atom.dirty = true;
398
399 if (ctx->b.chip_class <= R700) {
400 ctx->seamless_cube_map.atom.dirty = true;
401 }
402
403 ctx->vertex_buffer_state.dirty_mask = ctx->vertex_buffer_state.enabled_mask;
404 r600_vertex_buffers_dirty(ctx);
405
406 /* Re-emit shader resources. */
407 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
408 struct r600_constbuf_state *constbuf = &ctx->constbuf_state[shader];
409 struct r600_textures_info *samplers = &ctx->samplers[shader];
410
411 constbuf->dirty_mask = constbuf->enabled_mask;
412 samplers->views.dirty_mask = samplers->views.enabled_mask;
413 samplers->states.dirty_mask = samplers->states.enabled_mask;
414
415 r600_constant_buffers_dirty(ctx, constbuf);
416 r600_sampler_views_dirty(ctx, &samplers->views);
417 r600_sampler_states_dirty(ctx, &samplers->states);
418 }
419
420 if (ctx->b.streamout.suspended) {
421 ctx->b.streamout.append_bitmask = ctx->b.streamout.enabled_mask;
422 r600_streamout_buffers_dirty(&ctx->b);
423 }
424
425 /* resume queries */
426 if (ctx->nontimer_queries_suspended) {
427 r600_resume_nontimer_queries(ctx);
428 }
429
430 /* Re-emit the draw state. */
431 ctx->last_primitive_type = -1;
432 ctx->last_start_instance = -1;
433
434 ctx->initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;
435 }
436
437 void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value)
438 {
439 struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
440 uint64_t va;
441
442 r600_need_cs_space(ctx, 10, FALSE);
443
444 va = r600_resource_va(&ctx->screen->b.b, (void*)fence_bo);
445 va = va + (offset << 2);
446
447 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
448 if (ctx->b.family >= CHIP_CAYMAN) {
449 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
450 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
451 } else {
452 r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
453 }
454
455 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
456 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
457 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* ADDRESS_LO */
458 /* DATA_SEL | INT_EN | ADDRESS_HI */
459 cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF);
460 cs->buf[cs->cdw++] = value; /* DATA_LO */
461 cs->buf[cs->cdw++] = 0; /* DATA_HI */
462 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
463 cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, fence_bo, RADEON_USAGE_WRITE);
464 }
465
466 /* The max number of bytes to copy per packet. */
467 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
468
469 void r600_cp_dma_copy_buffer(struct r600_context *rctx,
470 struct pipe_resource *dst, uint64_t dst_offset,
471 struct pipe_resource *src, uint64_t src_offset,
472 unsigned size)
473 {
474 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
475
476 assert(size);
477 assert(rctx->screen->has_cp_dma);
478
479 dst_offset += r600_resource_va(&rctx->screen->b.b, dst);
480 src_offset += r600_resource_va(&rctx->screen->b.b, src);
481
482 /* Flush the caches where the resources are bound. */
483 r600_flag_resource_cache_flush(rctx, src);
484 r600_flag_resource_cache_flush(rctx, dst);
485 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
486
487 /* There are differences between R700 and EG in CP DMA,
488 * but we only use the common bits here. */
489 while (size) {
490 unsigned sync = 0;
491 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
492 unsigned src_reloc, dst_reloc;
493
494 r600_need_cs_space(rctx, 10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE);
495
496 /* Flush the caches for the first copy only. */
497 if (rctx->b.flags) {
498 r600_flush_emit(rctx);
499 }
500
501 /* Do the synchronization after the last copy, so that all data is written to memory. */
502 if (size == byte_count) {
503 sync = PKT3_CP_DMA_CP_SYNC;
504 }
505
506 /* This must be done after r600_need_cs_space. */
507 src_reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
508 dst_reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
509
510 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
511 radeon_emit(cs, src_offset); /* SRC_ADDR_LO [31:0] */
512 radeon_emit(cs, sync | ((src_offset >> 32) & 0xff)); /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */
513 radeon_emit(cs, dst_offset); /* DST_ADDR_LO [31:0] */
514 radeon_emit(cs, (dst_offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */
515 radeon_emit(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
516
517 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
518 radeon_emit(cs, src_reloc);
519 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
520 radeon_emit(cs, dst_reloc);
521
522 size -= byte_count;
523 src_offset += byte_count;
524 dst_offset += byte_count;
525 }
526
527 /* Flush the cache of the dst resource again in case the 3D engine
528 * has been prefetching it. */
529 r600_flag_resource_cache_flush(rctx, dst);
530
531 util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
532 dst_offset + size);
533 }
534
535 void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw)
536 {
537 /* The number of dwords we already used in the DMA so far. */
538 num_dw += ctx->b.rings.dma.cs->cdw;
539 /* Flush if there's not enough space. */
540 if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
541 ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
542 }
543 }
544
545 void r600_dma_copy(struct r600_context *rctx,
546 struct pipe_resource *dst,
547 struct pipe_resource *src,
548 uint64_t dst_offset,
549 uint64_t src_offset,
550 uint64_t size)
551 {
552 struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
553 unsigned i, ncopy, csize, shift;
554 struct r600_resource *rdst = (struct r600_resource*)dst;
555 struct r600_resource *rsrc = (struct r600_resource*)src;
556
557 /* make sure that the dma ring is only one active */
558 rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
559
560 size >>= 2;
561 shift = 2;
562 ncopy = (size / 0xffff) + !!(size % 0xffff);
563
564 r600_need_dma_space(rctx, ncopy * 5);
565 for (i = 0; i < ncopy; i++) {
566 csize = size < 0xffff ? size : 0xffff;
567 /* emit reloc before writting cs so that cs is always in consistent state */
568 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rsrc, RADEON_USAGE_READ);
569 r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rdst, RADEON_USAGE_WRITE);
570 cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize);
571 cs->buf[cs->cdw++] = dst_offset & 0xfffffffc;
572 cs->buf[cs->cdw++] = src_offset & 0xfffffffc;
573 cs->buf[cs->cdw++] = (dst_offset >> 32UL) & 0xff;
574 cs->buf[cs->cdw++] = (src_offset >> 32UL) & 0xff;
575 dst_offset += csize << shift;
576 src_offset += csize << shift;
577 size -= csize;
578 }
579
580 util_range_add(&rdst->valid_buffer_range, dst_offset,
581 dst_offset + size);
582 }
583
584 /* Flag the cache of the resource for it to be flushed later if the resource
585 * is bound. Otherwise do nothing. Used for synchronization between engines.
586 */
587 void r600_flag_resource_cache_flush(struct r600_context *rctx,
588 struct pipe_resource *res)
589 {
590 /* Check vertex buffers. */
591 uint32_t mask = rctx->vertex_buffer_state.enabled_mask;
592 while (mask) {
593 uint32_t i = u_bit_scan(&mask);
594 if (rctx->vertex_buffer_state.vb[i].buffer == res) {
595 rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
596 }
597 }
598
599 /* Check vertex buffers for compute. */
600 mask = rctx->cs_vertex_buffer_state.enabled_mask;
601 while (mask) {
602 uint32_t i = u_bit_scan(&mask);
603 if (rctx->cs_vertex_buffer_state.vb[i].buffer == res) {
604 rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
605 }
606 }
607
608 /* Check constant buffers. */
609 unsigned shader;
610 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
611 struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
612 uint32_t mask = state->enabled_mask;
613
614 while (mask) {
615 unsigned i = u_bit_scan(&mask);
616 if (state->cb[i].buffer == res) {
617 rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
618
619 shader = PIPE_SHADER_TYPES; /* break the outer loop */
620 break;
621 }
622 }
623 }
624
625 /* Check textures. */
626 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
627 struct r600_samplerview_state *state = &rctx->samplers[shader].views;
628 uint32_t mask = state->enabled_mask;
629
630 while (mask) {
631 uint32_t i = u_bit_scan(&mask);
632 if (&state->views[i]->tex_resource->b.b == res) {
633 rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
634
635 shader = PIPE_SHADER_TYPES; /* break the outer loop */
636 break;
637 }
638 }
639 }
640
641 /* Check streamout buffers. */
642 int i;
643 for (i = 0; i < rctx->b.streamout.num_targets; i++) {
644 if (rctx->b.streamout.targets[i]->b.buffer == res) {
645 rctx->b.flags |= R600_CONTEXT_STREAMOUT_FLUSH |
646 R600_CONTEXT_FLUSH_AND_INV |
647 R600_CONTEXT_WAIT_3D_IDLE;
648 break;
649 }
650 }
651
652 /* Check colorbuffers. */
653 for (i = 0; i < rctx->framebuffer.state.nr_cbufs; i++) {
654 struct r600_texture *tex;
655
656 if (rctx->framebuffer.state.cbufs[i] == NULL) {
657 continue;
658 }
659
660 tex = (struct r600_texture*)rctx->framebuffer.state.cbufs[i]->texture;
661
662 if (rctx->framebuffer.state.cbufs[i]->texture == res) {
663 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB |
664 R600_CONTEXT_FLUSH_AND_INV |
665 R600_CONTEXT_WAIT_3D_IDLE;
666
667 if (tex->cmask.size || tex->fmask.size) {
668 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META;
669 }
670 break;
671 }
672
673 if (tex && tex->cmask_buffer && tex->cmask_buffer != &tex->resource && &tex->cmask_buffer->b.b == res) {
674 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META |
675 R600_CONTEXT_FLUSH_AND_INV |
676 R600_CONTEXT_WAIT_3D_IDLE;
677 }
678 }
679
680 /* Check a depth buffer. */
681 if (rctx->framebuffer.state.zsbuf) {
682 if (rctx->framebuffer.state.zsbuf->texture == res) {
683 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB |
684 R600_CONTEXT_FLUSH_AND_INV |
685 R600_CONTEXT_WAIT_3D_IDLE;
686 }
687
688 struct r600_texture *tex =
689 (struct r600_texture*)rctx->framebuffer.state.zsbuf->texture;
690 if (tex && tex->htile && &tex->htile->b.b == res) {
691 rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META |
692 R600_CONTEXT_FLUSH_AND_INV |
693 R600_CONTEXT_WAIT_3D_IDLE;
694 }
695 }
696 }