2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "r600_pipe.h"
28 #include "util/u_memory.h"
32 /* Get backends mask */
33 void r600_get_backend_mask(struct r600_context
*ctx
)
35 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
36 struct r600_resource
*buffer
;
38 unsigned num_backends
= ctx
->screen
->b
.info
.r600_num_backends
;
42 /* if backend_map query is supported by the kernel */
43 if (ctx
->screen
->b
.info
.r600_backend_map_valid
) {
44 unsigned num_tile_pipes
= ctx
->screen
->b
.info
.r600_num_tile_pipes
;
45 unsigned backend_map
= ctx
->screen
->b
.info
.r600_backend_map
;
46 unsigned item_width
, item_mask
;
48 if (ctx
->b
.chip_class
>= EVERGREEN
) {
56 while(num_tile_pipes
--) {
57 i
= backend_map
& item_mask
;
59 backend_map
>>= item_width
;
62 ctx
->backend_mask
= mask
;
67 /* otherwise backup path for older kernels */
69 /* create buffer for event data */
70 buffer
= (struct r600_resource
*)
71 pipe_buffer_create(&ctx
->screen
->b
.b
, PIPE_BIND_CUSTOM
,
72 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
75 va
= r600_resource_va(&ctx
->screen
->b
.b
, (void*)buffer
);
77 /* initialize buffer with zeroes */
78 results
= r600_buffer_mmap_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
80 memset(results
, 0, ctx
->max_db
* 4 * 4);
81 ctx
->b
.ws
->buffer_unmap(buffer
->cs_buf
);
83 /* emit EVENT_WRITE for ZPASS_DONE */
84 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
85 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
86 cs
->buf
[cs
->cdw
++] = va
;
87 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
89 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
90 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(&ctx
->b
, &ctx
->b
.rings
.gfx
, buffer
, RADEON_USAGE_WRITE
);
93 results
= r600_buffer_mmap_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
95 for(i
= 0; i
< ctx
->max_db
; i
++) {
96 /* at least highest bit will be set if backend is used */
100 ctx
->b
.ws
->buffer_unmap(buffer
->cs_buf
);
104 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
107 ctx
->backend_mask
= mask
;
112 /* fallback to old method - set num_backends lower bits to 1 */
113 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
117 void r600_need_cs_space(struct r600_context
*ctx
, unsigned num_dw
,
118 boolean count_draw_in
)
120 if (!ctx
->b
.ws
->cs_memory_below_limit(ctx
->b
.rings
.gfx
.cs
, ctx
->b
.vram
, ctx
->b
.gtt
)) {
123 ctx
->b
.rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
);
126 /* all will be accounted once relocation are emited */
130 /* The number of dwords we already used in the CS so far. */
131 num_dw
+= ctx
->b
.rings
.gfx
.cs
->cdw
;
136 /* The number of dwords all the dirty states would take. */
137 for (i
= 0; i
< R600_NUM_ATOMS
; i
++) {
138 if (ctx
->atoms
[i
] && ctx
->atoms
[i
]->dirty
) {
139 num_dw
+= ctx
->atoms
[i
]->num_dw
;
140 if (ctx
->screen
->trace_bo
) {
141 num_dw
+= R600_TRACE_CS_DWORDS
;
146 /* The upper-bound of how much space a draw command would take. */
147 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
+ R600_MAX_DRAW_CS_DWORDS
;
148 if (ctx
->screen
->trace_bo
) {
149 num_dw
+= R600_TRACE_CS_DWORDS
;
153 /* Count in queries_suspend. */
154 num_dw
+= ctx
->num_cs_dw_nontimer_queries_suspend
;
156 /* Count in streamout_end at the end of CS. */
157 if (ctx
->b
.streamout
.begin_emitted
) {
158 num_dw
+= ctx
->b
.streamout
.num_dw_for_end
;
161 /* Count in render_condition(NULL) at the end of CS. */
162 if (ctx
->predicate_drawing
) {
167 if (ctx
->b
.chip_class
<= R700
) {
171 /* Count in framebuffer cache flushes at the end of CS. */
172 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
;
174 /* The fence at the end of CS. */
177 /* Flush if there's not enough space. */
178 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
179 ctx
->b
.rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
);
183 void r600_flush_emit(struct r600_context
*rctx
)
185 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
186 unsigned cp_coher_cntl
= 0;
187 unsigned wait_until
= 0;
189 if (!rctx
->b
.flags
) {
193 if (rctx
->b
.flags
& R600_CONTEXT_WAIT_3D_IDLE
) {
194 wait_until
|= S_008040_WAIT_3D_IDLE(1);
196 if (rctx
->b
.flags
& R600_CONTEXT_WAIT_CP_DMA_IDLE
) {
197 wait_until
|= S_008040_WAIT_CP_DMA_IDLE(1);
201 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
202 if (rctx
->b
.family
>= CHIP_CAYMAN
) {
203 /* emit a PS partial flush on Cayman/TN */
204 rctx
->b
.flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
208 if (rctx
->b
.flags
& R600_CONTEXT_PS_PARTIAL_FLUSH
) {
209 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
210 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
213 if (rctx
->b
.chip_class
>= R700
&&
214 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
)) {
215 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
216 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0);
219 if (rctx
->b
.chip_class
>= R700
&&
220 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_DB_META
)) {
221 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
222 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0);
224 /* Set FULL_CACHE_ENA for DB META flushes on r7xx and later.
226 * This hack predates use of FLUSH_AND_INV_DB_META, so it's
227 * unclear whether it's still needed or even whether it has
230 cp_coher_cntl
|= S_0085F0_FULL_CACHE_ENA(1);
233 if (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV
) {
234 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
235 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0);
238 if (rctx
->b
.flags
& R600_CONTEXT_INV_CONST_CACHE
) {
239 cp_coher_cntl
|= S_0085F0_SH_ACTION_ENA(1);
241 if (rctx
->b
.flags
& R600_CONTEXT_INV_VERTEX_CACHE
) {
242 cp_coher_cntl
|= rctx
->has_vertex_cache
? S_0085F0_VC_ACTION_ENA(1)
243 : S_0085F0_TC_ACTION_ENA(1);
245 if (rctx
->b
.flags
& R600_CONTEXT_INV_TEX_CACHE
) {
246 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1);
249 /* Don't use the DB CP COHER logic on r6xx.
252 if (rctx
->b
.chip_class
>= R700
&&
253 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_DB
)) {
254 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
255 S_0085F0_DB_DEST_BASE_ENA(1) |
256 S_0085F0_SMX_ACTION_ENA(1);
259 /* Don't use the CB CP COHER logic on r6xx.
262 if (rctx
->b
.chip_class
>= R700
&&
263 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_CB
)) {
264 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
265 S_0085F0_CB0_DEST_BASE_ENA(1) |
266 S_0085F0_CB1_DEST_BASE_ENA(1) |
267 S_0085F0_CB2_DEST_BASE_ENA(1) |
268 S_0085F0_CB3_DEST_BASE_ENA(1) |
269 S_0085F0_CB4_DEST_BASE_ENA(1) |
270 S_0085F0_CB5_DEST_BASE_ENA(1) |
271 S_0085F0_CB6_DEST_BASE_ENA(1) |
272 S_0085F0_CB7_DEST_BASE_ENA(1) |
273 S_0085F0_SMX_ACTION_ENA(1);
274 if (rctx
->b
.chip_class
>= EVERGREEN
)
275 cp_coher_cntl
|= S_0085F0_CB8_DEST_BASE_ENA(1) |
276 S_0085F0_CB9_DEST_BASE_ENA(1) |
277 S_0085F0_CB10_DEST_BASE_ENA(1) |
278 S_0085F0_CB11_DEST_BASE_ENA(1);
281 if (rctx
->b
.flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
282 cp_coher_cntl
|= S_0085F0_SO0_DEST_BASE_ENA(1) |
283 S_0085F0_SO1_DEST_BASE_ENA(1) |
284 S_0085F0_SO2_DEST_BASE_ENA(1) |
285 S_0085F0_SO3_DEST_BASE_ENA(1) |
286 S_0085F0_SMX_ACTION_ENA(1);
290 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_SYNC
, 3, 0);
291 cs
->buf
[cs
->cdw
++] = cp_coher_cntl
; /* CP_COHER_CNTL */
292 cs
->buf
[cs
->cdw
++] = 0xffffffff; /* CP_COHER_SIZE */
293 cs
->buf
[cs
->cdw
++] = 0; /* CP_COHER_BASE */
294 cs
->buf
[cs
->cdw
++] = 0x0000000A; /* POLL_INTERVAL */
298 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
299 if (rctx
->b
.family
< CHIP_CAYMAN
) {
300 /* wait for things to settle */
301 r600_write_config_reg(cs
, R_008040_WAIT_UNTIL
, wait_until
);
305 /* everything is properly flushed */
309 void r600_context_flush(struct r600_context
*ctx
, unsigned flags
)
311 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
313 ctx
->nontimer_queries_suspended
= false;
314 ctx
->b
.streamout
.suspended
= false;
316 /* suspend queries */
317 if (ctx
->num_cs_dw_nontimer_queries_suspend
) {
318 r600_suspend_nontimer_queries(ctx
);
319 ctx
->nontimer_queries_suspended
= true;
322 if (ctx
->b
.streamout
.begin_emitted
) {
323 r600_emit_streamout_end(&ctx
->b
);
324 ctx
->b
.streamout
.suspended
= true;
327 /* flush is needed to avoid lockups on some chips with user fences
328 * this will also flush the framebuffer cache
330 ctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV
|
331 R600_CONTEXT_FLUSH_AND_INV_CB
|
332 R600_CONTEXT_FLUSH_AND_INV_DB
|
333 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
334 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
335 R600_CONTEXT_WAIT_3D_IDLE
|
336 R600_CONTEXT_WAIT_CP_DMA_IDLE
;
338 r600_flush_emit(ctx
);
340 /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
341 if (ctx
->b
.chip_class
<= R700
) {
342 r600_write_context_reg(cs
, R_028350_SX_MISC
, 0);
345 /* force to keep tiling flags */
346 if (ctx
->keep_tiling_flags
) {
347 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
351 ctx
->b
.ws
->cs_flush(ctx
->b
.rings
.gfx
.cs
, flags
, ctx
->screen
->cs_count
++);
354 void r600_begin_new_cs(struct r600_context
*ctx
)
362 /* Begin a new CS. */
363 r600_emit_command_buffer(ctx
->b
.rings
.gfx
.cs
, &ctx
->start_cs_cmd
);
365 /* Re-emit states. */
366 ctx
->alphatest_state
.atom
.dirty
= true;
367 ctx
->blend_color
.atom
.dirty
= true;
368 ctx
->cb_misc_state
.atom
.dirty
= true;
369 ctx
->clip_misc_state
.atom
.dirty
= true;
370 ctx
->clip_state
.atom
.dirty
= true;
371 ctx
->db_misc_state
.atom
.dirty
= true;
372 ctx
->db_state
.atom
.dirty
= true;
373 ctx
->framebuffer
.atom
.dirty
= true;
374 ctx
->pixel_shader
.atom
.dirty
= true;
375 ctx
->poly_offset_state
.atom
.dirty
= true;
376 ctx
->vgt_state
.atom
.dirty
= true;
377 ctx
->sample_mask
.atom
.dirty
= true;
378 ctx
->scissor
.atom
.dirty
= true;
379 ctx
->config_state
.atom
.dirty
= true;
380 ctx
->stencil_ref
.atom
.dirty
= true;
381 ctx
->vertex_fetch_shader
.atom
.dirty
= true;
382 ctx
->vertex_shader
.atom
.dirty
= true;
383 ctx
->viewport
.atom
.dirty
= true;
385 if (ctx
->blend_state
.cso
)
386 ctx
->blend_state
.atom
.dirty
= true;
387 if (ctx
->dsa_state
.cso
)
388 ctx
->dsa_state
.atom
.dirty
= true;
389 if (ctx
->rasterizer_state
.cso
)
390 ctx
->rasterizer_state
.atom
.dirty
= true;
392 if (ctx
->b
.chip_class
<= R700
) {
393 ctx
->seamless_cube_map
.atom
.dirty
= true;
396 ctx
->vertex_buffer_state
.dirty_mask
= ctx
->vertex_buffer_state
.enabled_mask
;
397 r600_vertex_buffers_dirty(ctx
);
399 /* Re-emit shader resources. */
400 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
401 struct r600_constbuf_state
*constbuf
= &ctx
->constbuf_state
[shader
];
402 struct r600_textures_info
*samplers
= &ctx
->samplers
[shader
];
404 constbuf
->dirty_mask
= constbuf
->enabled_mask
;
405 samplers
->views
.dirty_mask
= samplers
->views
.enabled_mask
;
406 samplers
->states
.dirty_mask
= samplers
->states
.enabled_mask
;
408 r600_constant_buffers_dirty(ctx
, constbuf
);
409 r600_sampler_views_dirty(ctx
, &samplers
->views
);
410 r600_sampler_states_dirty(ctx
, &samplers
->states
);
413 if (ctx
->b
.streamout
.suspended
) {
414 ctx
->b
.streamout
.append_bitmask
= ctx
->b
.streamout
.enabled_mask
;
415 r600_streamout_buffers_dirty(&ctx
->b
);
419 if (ctx
->nontimer_queries_suspended
) {
420 r600_resume_nontimer_queries(ctx
);
423 /* Re-emit the draw state. */
424 ctx
->last_primitive_type
= -1;
425 ctx
->last_start_instance
= -1;
427 ctx
->initial_gfx_cs_size
= ctx
->b
.rings
.gfx
.cs
->cdw
;
430 void r600_context_emit_fence(struct r600_context
*ctx
, struct r600_resource
*fence_bo
, unsigned offset
, unsigned value
)
432 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
435 r600_need_cs_space(ctx
, 10, FALSE
);
437 va
= r600_resource_va(&ctx
->screen
->b
.b
, (void*)fence_bo
);
438 va
= va
+ (offset
<< 2);
440 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
441 if (ctx
->b
.family
>= CHIP_CAYMAN
) {
442 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
443 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
445 r600_write_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
448 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
449 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
450 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* ADDRESS_LO */
451 /* DATA_SEL | INT_EN | ADDRESS_HI */
452 cs
->buf
[cs
->cdw
++] = (1 << 29) | (0 << 24) | ((va
>> 32UL) & 0xFF);
453 cs
->buf
[cs
->cdw
++] = value
; /* DATA_LO */
454 cs
->buf
[cs
->cdw
++] = 0; /* DATA_HI */
455 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
456 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(&ctx
->b
, &ctx
->b
.rings
.gfx
, fence_bo
, RADEON_USAGE_WRITE
);
459 /* The max number of bytes to copy per packet. */
460 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
462 void r600_cp_dma_copy_buffer(struct r600_context
*rctx
,
463 struct pipe_resource
*dst
, uint64_t dst_offset
,
464 struct pipe_resource
*src
, uint64_t src_offset
,
467 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
470 assert(rctx
->screen
->has_cp_dma
);
472 dst_offset
+= r600_resource_va(&rctx
->screen
->b
.b
, dst
);
473 src_offset
+= r600_resource_va(&rctx
->screen
->b
.b
, src
);
475 /* Flush the caches where the resources are bound. */
476 r600_flag_resource_cache_flush(rctx
, src
);
477 r600_flag_resource_cache_flush(rctx
, dst
);
478 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
480 /* There are differences between R700 and EG in CP DMA,
481 * but we only use the common bits here. */
484 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
485 unsigned src_reloc
, dst_reloc
;
487 r600_need_cs_space(rctx
, 10 + (rctx
->b
.flags
? R600_MAX_FLUSH_CS_DWORDS
: 0), FALSE
);
489 /* Flush the caches for the first copy only. */
491 r600_flush_emit(rctx
);
494 /* Do the synchronization after the last copy, so that all data is written to memory. */
495 if (size
== byte_count
) {
496 sync
= PKT3_CP_DMA_CP_SYNC
;
499 /* This must be done after r600_need_cs_space. */
500 src_reloc
= r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
, (struct r600_resource
*)src
, RADEON_USAGE_READ
);
501 dst_reloc
= r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
, (struct r600_resource
*)dst
, RADEON_USAGE_WRITE
);
503 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
504 radeon_emit(cs
, src_offset
); /* SRC_ADDR_LO [31:0] */
505 radeon_emit(cs
, sync
| ((src_offset
>> 32) & 0xff)); /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */
506 radeon_emit(cs
, dst_offset
); /* DST_ADDR_LO [31:0] */
507 radeon_emit(cs
, (dst_offset
>> 32) & 0xff); /* DST_ADDR_HI [7:0] */
508 radeon_emit(cs
, byte_count
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
510 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
511 radeon_emit(cs
, src_reloc
);
512 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
513 radeon_emit(cs
, dst_reloc
);
516 src_offset
+= byte_count
;
517 dst_offset
+= byte_count
;
520 /* Flush the cache of the dst resource again in case the 3D engine
521 * has been prefetching it. */
522 r600_flag_resource_cache_flush(rctx
, dst
);
524 util_range_add(&r600_resource(dst
)->valid_buffer_range
, dst_offset
,
528 void r600_need_dma_space(struct r600_context
*ctx
, unsigned num_dw
)
530 /* The number of dwords we already used in the DMA so far. */
531 num_dw
+= ctx
->b
.rings
.dma
.cs
->cdw
;
532 /* Flush if there's not enough space. */
533 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
534 ctx
->b
.rings
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
);
538 void r600_dma_copy(struct r600_context
*rctx
,
539 struct pipe_resource
*dst
,
540 struct pipe_resource
*src
,
545 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.dma
.cs
;
546 unsigned i
, ncopy
, csize
, shift
;
547 struct r600_resource
*rdst
= (struct r600_resource
*)dst
;
548 struct r600_resource
*rsrc
= (struct r600_resource
*)src
;
550 /* make sure that the dma ring is only one active */
551 rctx
->b
.rings
.gfx
.flush(rctx
, RADEON_FLUSH_ASYNC
);
555 ncopy
= (size
/ 0xffff) + !!(size
% 0xffff);
557 r600_need_dma_space(rctx
, ncopy
* 5);
558 for (i
= 0; i
< ncopy
; i
++) {
559 csize
= size
< 0xffff ? size
: 0xffff;
560 /* emit reloc before writting cs so that cs is always in consistent state */
561 r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.dma
, rsrc
, RADEON_USAGE_READ
);
562 r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.dma
, rdst
, RADEON_USAGE_WRITE
);
563 cs
->buf
[cs
->cdw
++] = DMA_PACKET(DMA_PACKET_COPY
, 0, 0, csize
);
564 cs
->buf
[cs
->cdw
++] = dst_offset
& 0xfffffffc;
565 cs
->buf
[cs
->cdw
++] = src_offset
& 0xfffffffc;
566 cs
->buf
[cs
->cdw
++] = (dst_offset
>> 32UL) & 0xff;
567 cs
->buf
[cs
->cdw
++] = (src_offset
>> 32UL) & 0xff;
568 dst_offset
+= csize
<< shift
;
569 src_offset
+= csize
<< shift
;
573 util_range_add(&rdst
->valid_buffer_range
, dst_offset
,
577 /* Flag the cache of the resource for it to be flushed later if the resource
578 * is bound. Otherwise do nothing. Used for synchronization between engines.
580 void r600_flag_resource_cache_flush(struct r600_context
*rctx
,
581 struct pipe_resource
*res
)
583 /* Check vertex buffers. */
584 uint32_t mask
= rctx
->vertex_buffer_state
.enabled_mask
;
586 uint32_t i
= u_bit_scan(&mask
);
587 if (rctx
->vertex_buffer_state
.vb
[i
].buffer
== res
) {
588 rctx
->b
.flags
|= R600_CONTEXT_INV_VERTEX_CACHE
;
592 /* Check vertex buffers for compute. */
593 mask
= rctx
->cs_vertex_buffer_state
.enabled_mask
;
595 uint32_t i
= u_bit_scan(&mask
);
596 if (rctx
->cs_vertex_buffer_state
.vb
[i
].buffer
== res
) {
597 rctx
->b
.flags
|= R600_CONTEXT_INV_VERTEX_CACHE
;
601 /* Check constant buffers. */
603 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
604 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
605 uint32_t mask
= state
->enabled_mask
;
608 unsigned i
= u_bit_scan(&mask
);
609 if (state
->cb
[i
].buffer
== res
) {
610 rctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
;
612 shader
= PIPE_SHADER_TYPES
; /* break the outer loop */
618 /* Check textures. */
619 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
620 struct r600_samplerview_state
*state
= &rctx
->samplers
[shader
].views
;
621 uint32_t mask
= state
->enabled_mask
;
624 uint32_t i
= u_bit_scan(&mask
);
625 if (&state
->views
[i
]->tex_resource
->b
.b
== res
) {
626 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
;
628 shader
= PIPE_SHADER_TYPES
; /* break the outer loop */
634 /* Check streamout buffers. */
636 for (i
= 0; i
< rctx
->b
.streamout
.num_targets
; i
++) {
637 if (rctx
->b
.streamout
.targets
[i
]->b
.buffer
== res
) {
638 rctx
->b
.flags
|= R600_CONTEXT_STREAMOUT_FLUSH
|
639 R600_CONTEXT_FLUSH_AND_INV
|
640 R600_CONTEXT_WAIT_3D_IDLE
;
645 /* Check colorbuffers. */
646 for (i
= 0; i
< rctx
->framebuffer
.state
.nr_cbufs
; i
++) {
647 if (rctx
->framebuffer
.state
.cbufs
[i
] &&
648 rctx
->framebuffer
.state
.cbufs
[i
]->texture
== res
) {
649 struct r600_texture
*tex
=
650 (struct r600_texture
*)rctx
->framebuffer
.state
.cbufs
[i
]->texture
;
652 rctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_CB
|
653 R600_CONTEXT_FLUSH_AND_INV
|
654 R600_CONTEXT_WAIT_3D_IDLE
;
656 if (tex
->cmask_size
|| tex
->fmask_size
) {
657 rctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_CB_META
;
663 /* Check a depth buffer. */
664 if (rctx
->framebuffer
.state
.zsbuf
) {
665 if (rctx
->framebuffer
.state
.zsbuf
->texture
== res
) {
666 rctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_DB
|
667 R600_CONTEXT_FLUSH_AND_INV
|
668 R600_CONTEXT_WAIT_3D_IDLE
;
671 struct r600_texture
*tex
=
672 (struct r600_texture
*)rctx
->framebuffer
.state
.zsbuf
->texture
;
673 if (tex
&& tex
->htile
&& &tex
->htile
->b
.b
== res
) {
674 rctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV_DB_META
|
675 R600_CONTEXT_FLUSH_AND_INV
|
676 R600_CONTEXT_WAIT_3D_IDLE
;