2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "r600_pipe.h"
28 #include "util/u_memory.h"
32 /* Get backends mask */
33 void r600_get_backend_mask(struct r600_context
*ctx
)
35 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
36 struct r600_resource
*buffer
;
38 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
42 /* if backend_map query is supported by the kernel */
43 if (ctx
->screen
->info
.r600_backend_map_valid
) {
44 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
45 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
46 unsigned item_width
, item_mask
;
48 if (ctx
->chip_class
>= EVERGREEN
) {
56 while(num_tile_pipes
--) {
57 i
= backend_map
& item_mask
;
59 backend_map
>>= item_width
;
62 ctx
->backend_mask
= mask
;
67 /* otherwise backup path for older kernels */
69 /* create buffer for event data */
70 buffer
= (struct r600_resource
*)
71 pipe_buffer_create(&ctx
->screen
->screen
, PIPE_BIND_CUSTOM
,
72 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
75 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)buffer
);
77 /* initialize buffer with zeroes */
78 results
= r600_buffer_mmap_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
80 memset(results
, 0, ctx
->max_db
* 4 * 4);
81 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
83 /* emit EVENT_WRITE for ZPASS_DONE */
84 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
85 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
86 cs
->buf
[cs
->cdw
++] = va
;
87 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
89 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
90 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, buffer
, RADEON_USAGE_WRITE
);
93 results
= r600_buffer_mmap_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
95 for(i
= 0; i
< ctx
->max_db
; i
++) {
96 /* at least highest bit will be set if backend is used */
100 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
104 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
107 ctx
->backend_mask
= mask
;
112 /* fallback to old method - set num_backends lower bits to 1 */
113 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
117 void r600_need_cs_space(struct r600_context
*ctx
, unsigned num_dw
,
118 boolean count_draw_in
)
120 if (!ctx
->ws
->cs_memory_below_limit(ctx
->rings
.gfx
.cs
, ctx
->vram
, ctx
->gtt
)) {
123 ctx
->rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
);
126 /* all will be accounted once relocation are emited */
130 /* The number of dwords we already used in the CS so far. */
131 num_dw
+= ctx
->rings
.gfx
.cs
->cdw
;
136 /* The number of dwords all the dirty states would take. */
137 for (i
= 0; i
< R600_NUM_ATOMS
; i
++) {
138 if (ctx
->atoms
[i
] && ctx
->atoms
[i
]->dirty
) {
139 num_dw
+= ctx
->atoms
[i
]->num_dw
;
140 if (ctx
->screen
->trace_bo
) {
141 num_dw
+= R600_TRACE_CS_DWORDS
;
146 /* The upper-bound of how much space a draw command would take. */
147 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
+ R600_MAX_DRAW_CS_DWORDS
;
148 if (ctx
->screen
->trace_bo
) {
149 num_dw
+= R600_TRACE_CS_DWORDS
;
153 /* Count in queries_suspend. */
154 num_dw
+= ctx
->num_cs_dw_nontimer_queries_suspend
;
156 /* Count in streamout_end at the end of CS. */
157 if (ctx
->streamout
.begin_emitted
) {
158 num_dw
+= ctx
->streamout
.num_dw_for_end
;
161 /* Count in render_condition(NULL) at the end of CS. */
162 if (ctx
->predicate_drawing
) {
167 if (ctx
->chip_class
<= R700
) {
171 /* Count in framebuffer cache flushes at the end of CS. */
172 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
;
174 /* The fence at the end of CS. */
177 /* Flush if there's not enough space. */
178 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
179 ctx
->rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
);
183 void r600_flush_emit(struct r600_context
*rctx
)
185 struct radeon_winsys_cs
*cs
= rctx
->rings
.gfx
.cs
;
186 unsigned cp_coher_cntl
= 0;
187 unsigned wait_until
= 0;
188 unsigned emit_flush
= 0;
194 if (rctx
->flags
& R600_CONTEXT_WAIT_3D_IDLE
) {
195 wait_until
|= S_008040_WAIT_3D_IDLE(1);
197 if (rctx
->flags
& R600_CONTEXT_WAIT_CP_DMA_IDLE
) {
198 wait_until
|= S_008040_WAIT_CP_DMA_IDLE(1);
202 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
203 if (rctx
->family
>= CHIP_CAYMAN
) {
204 /* emit a PS partial flush on Cayman/TN */
205 rctx
->flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
209 if (rctx
->flags
& R600_CONTEXT_PS_PARTIAL_FLUSH
) {
210 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
211 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
214 if (rctx
->chip_class
>= R700
&&
215 (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
)) {
216 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
217 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0);
220 if (rctx
->chip_class
>= R700
&&
221 (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB_META
)) {
222 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
223 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0);
226 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV
) {
227 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
228 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0);
229 if (rctx
->chip_class
>= EVERGREEN
) {
230 cp_coher_cntl
= S_0085F0_CB0_DEST_BASE_ENA(1) |
231 S_0085F0_CB1_DEST_BASE_ENA(1) |
232 S_0085F0_CB2_DEST_BASE_ENA(1) |
233 S_0085F0_CB3_DEST_BASE_ENA(1) |
234 S_0085F0_CB4_DEST_BASE_ENA(1) |
235 S_0085F0_CB5_DEST_BASE_ENA(1) |
236 S_0085F0_CB6_DEST_BASE_ENA(1) |
237 S_0085F0_CB7_DEST_BASE_ENA(1) |
238 S_0085F0_CB8_DEST_BASE_ENA(1) |
239 S_0085F0_CB9_DEST_BASE_ENA(1) |
240 S_0085F0_CB10_DEST_BASE_ENA(1) |
241 S_0085F0_CB11_DEST_BASE_ENA(1) |
242 S_0085F0_DB_DEST_BASE_ENA(1) |
243 S_0085F0_TC_ACTION_ENA(1) |
244 S_0085F0_CB_ACTION_ENA(1) |
245 S_0085F0_DB_ACTION_ENA(1) |
246 S_0085F0_SH_ACTION_ENA(1) |
247 S_0085F0_SMX_ACTION_ENA(1) |
248 S_0085F0_FULL_CACHE_ENA(1);
250 cp_coher_cntl
= S_0085F0_SMX_ACTION_ENA(1) |
251 S_0085F0_SH_ACTION_ENA(1) |
252 S_0085F0_VC_ACTION_ENA(1) |
253 S_0085F0_TC_ACTION_ENA(1) |
254 S_0085F0_FULL_CACHE_ENA(1);
259 if (rctx
->flags
& R600_CONTEXT_INVAL_READ_CACHES
) {
260 cp_coher_cntl
|= S_0085F0_VC_ACTION_ENA(1) |
261 S_0085F0_TC_ACTION_ENA(1) |
262 S_0085F0_FULL_CACHE_ENA(1);
266 if (rctx
->flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
267 cp_coher_cntl
|= S_0085F0_SO0_DEST_BASE_ENA(1) |
268 S_0085F0_SO1_DEST_BASE_ENA(1) |
269 S_0085F0_SO2_DEST_BASE_ENA(1) |
270 S_0085F0_SO3_DEST_BASE_ENA(1) |
271 S_0085F0_SMX_ACTION_ENA(1);
276 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_SYNC
, 3, 0);
277 cs
->buf
[cs
->cdw
++] = cp_coher_cntl
; /* CP_COHER_CNTL */
278 cs
->buf
[cs
->cdw
++] = 0xffffffff; /* CP_COHER_SIZE */
279 cs
->buf
[cs
->cdw
++] = 0; /* CP_COHER_BASE */
280 cs
->buf
[cs
->cdw
++] = 0x0000000A; /* POLL_INTERVAL */
284 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
285 if (rctx
->family
< CHIP_CAYMAN
) {
286 /* wait for things to settle */
287 r600_write_config_reg(cs
, R_008040_WAIT_UNTIL
, wait_until
);
291 /* everything is properly flushed */
295 void r600_context_flush(struct r600_context
*ctx
, unsigned flags
)
297 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
299 if (cs
->cdw
== ctx
->start_cs_cmd
.num_dw
)
302 ctx
->nontimer_queries_suspended
= false;
303 ctx
->streamout
.suspended
= false;
305 /* suspend queries */
306 if (ctx
->num_cs_dw_nontimer_queries_suspend
) {
307 r600_suspend_nontimer_queries(ctx
);
308 ctx
->nontimer_queries_suspended
= true;
311 if (ctx
->streamout
.begin_emitted
) {
312 r600_emit_streamout_end(ctx
);
313 ctx
->streamout
.suspended
= true;
316 /* flush is needed to avoid lockups on some chips with user fences
317 * this will also flush the framebuffer cache
319 ctx
->flags
|= R600_CONTEXT_FLUSH_AND_INV
|
320 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
321 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
322 R600_CONTEXT_WAIT_3D_IDLE
|
323 R600_CONTEXT_WAIT_CP_DMA_IDLE
;
325 r600_flush_emit(ctx
);
327 /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
328 if (ctx
->chip_class
<= R700
) {
329 r600_write_context_reg(cs
, R_028350_SX_MISC
, 0);
332 /* force to keep tiling flags */
333 if (ctx
->keep_tiling_flags
) {
334 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
338 ctx
->ws
->cs_flush(ctx
->rings
.gfx
.cs
, flags
, ctx
->screen
->cs_count
++);
341 void r600_begin_new_cs(struct r600_context
*ctx
)
349 /* Begin a new CS. */
350 r600_emit_command_buffer(ctx
->rings
.gfx
.cs
, &ctx
->start_cs_cmd
);
352 /* Re-emit states. */
353 ctx
->alphatest_state
.atom
.dirty
= true;
354 ctx
->blend_color
.atom
.dirty
= true;
355 ctx
->cb_misc_state
.atom
.dirty
= true;
356 ctx
->clip_misc_state
.atom
.dirty
= true;
357 ctx
->clip_state
.atom
.dirty
= true;
358 ctx
->db_misc_state
.atom
.dirty
= true;
359 ctx
->db_state
.atom
.dirty
= true;
360 ctx
->framebuffer
.atom
.dirty
= true;
361 ctx
->pixel_shader
.atom
.dirty
= true;
362 ctx
->poly_offset_state
.atom
.dirty
= true;
363 ctx
->vgt_state
.atom
.dirty
= true;
364 ctx
->sample_mask
.atom
.dirty
= true;
365 ctx
->scissor
.atom
.dirty
= true;
366 ctx
->config_state
.atom
.dirty
= true;
367 ctx
->stencil_ref
.atom
.dirty
= true;
368 ctx
->vertex_fetch_shader
.atom
.dirty
= true;
369 ctx
->vertex_shader
.atom
.dirty
= true;
370 ctx
->viewport
.atom
.dirty
= true;
372 if (ctx
->blend_state
.cso
)
373 ctx
->blend_state
.atom
.dirty
= true;
374 if (ctx
->dsa_state
.cso
)
375 ctx
->dsa_state
.atom
.dirty
= true;
376 if (ctx
->rasterizer_state
.cso
)
377 ctx
->rasterizer_state
.atom
.dirty
= true;
379 if (ctx
->chip_class
<= R700
) {
380 ctx
->seamless_cube_map
.atom
.dirty
= true;
383 ctx
->vertex_buffer_state
.dirty_mask
= ctx
->vertex_buffer_state
.enabled_mask
;
384 r600_vertex_buffers_dirty(ctx
);
386 /* Re-emit shader resources. */
387 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
388 struct r600_constbuf_state
*constbuf
= &ctx
->constbuf_state
[shader
];
389 struct r600_textures_info
*samplers
= &ctx
->samplers
[shader
];
391 constbuf
->dirty_mask
= constbuf
->enabled_mask
;
392 samplers
->views
.dirty_mask
= samplers
->views
.enabled_mask
;
393 samplers
->states
.dirty_mask
= samplers
->states
.enabled_mask
;
395 r600_constant_buffers_dirty(ctx
, constbuf
);
396 r600_sampler_views_dirty(ctx
, &samplers
->views
);
397 r600_sampler_states_dirty(ctx
, &samplers
->states
);
400 if (ctx
->streamout
.suspended
) {
401 ctx
->streamout
.append_bitmask
= ctx
->streamout
.enabled_mask
;
402 r600_streamout_buffers_dirty(ctx
);
406 if (ctx
->nontimer_queries_suspended
) {
407 r600_resume_nontimer_queries(ctx
);
410 /* Re-emit the draw state. */
411 ctx
->last_primitive_type
= -1;
412 ctx
->last_start_instance
= -1;
415 void r600_context_emit_fence(struct r600_context
*ctx
, struct r600_resource
*fence_bo
, unsigned offset
, unsigned value
)
417 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
420 r600_need_cs_space(ctx
, 10, FALSE
);
422 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)fence_bo
);
423 va
= va
+ (offset
<< 2);
425 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
426 if (ctx
->family
>= CHIP_CAYMAN
) {
427 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
428 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
430 r600_write_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
433 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
434 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
435 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* ADDRESS_LO */
436 /* DATA_SEL | INT_EN | ADDRESS_HI */
437 cs
->buf
[cs
->cdw
++] = (1 << 29) | (0 << 24) | ((va
>> 32UL) & 0xFF);
438 cs
->buf
[cs
->cdw
++] = value
; /* DATA_LO */
439 cs
->buf
[cs
->cdw
++] = 0; /* DATA_HI */
440 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
441 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, fence_bo
, RADEON_USAGE_WRITE
);
444 static void r600_flush_vgt_streamout(struct r600_context
*ctx
)
446 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
448 r600_write_config_reg(cs
, R_008490_CP_STRMOUT_CNTL
, 0);
450 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
451 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH
) | EVENT_INDEX(0);
453 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_WAIT_REG_MEM
, 5, 0);
454 cs
->buf
[cs
->cdw
++] = WAIT_REG_MEM_EQUAL
; /* wait until the register is equal to the reference value */
455 cs
->buf
[cs
->cdw
++] = R_008490_CP_STRMOUT_CNTL
>> 2; /* register */
456 cs
->buf
[cs
->cdw
++] = 0;
457 cs
->buf
[cs
->cdw
++] = S_008490_OFFSET_UPDATE_DONE(1); /* reference value */
458 cs
->buf
[cs
->cdw
++] = S_008490_OFFSET_UPDATE_DONE(1); /* mask */
459 cs
->buf
[cs
->cdw
++] = 4; /* poll interval */
462 static void r600_set_streamout_enable(struct r600_context
*ctx
, unsigned buffer_enable_bit
)
464 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
466 if (buffer_enable_bit
) {
467 r600_write_context_reg(cs
, R_028AB0_VGT_STRMOUT_EN
, S_028AB0_STREAMOUT(1));
468 r600_write_context_reg(cs
, R_028B20_VGT_STRMOUT_BUFFER_EN
, buffer_enable_bit
);
470 r600_write_context_reg(cs
, R_028AB0_VGT_STRMOUT_EN
, S_028AB0_STREAMOUT(0));
474 void r600_emit_streamout_begin(struct r600_context
*ctx
, struct r600_atom
*atom
)
476 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
477 struct r600_so_target
**t
= ctx
->streamout
.targets
;
478 unsigned *stride_in_dw
= ctx
->vs_shader
->so
.stride
;
479 unsigned i
, update_flags
= 0;
482 if (ctx
->chip_class
>= EVERGREEN
) {
483 evergreen_flush_vgt_streamout(ctx
);
484 evergreen_set_streamout_enable(ctx
, ctx
->streamout
.enabled_mask
);
486 r600_flush_vgt_streamout(ctx
);
487 r600_set_streamout_enable(ctx
, ctx
->streamout
.enabled_mask
);
490 for (i
= 0; i
< ctx
->streamout
.num_targets
; i
++) {
492 t
[i
]->stride_in_dw
= stride_in_dw
[i
];
494 va
= r600_resource_va(&ctx
->screen
->screen
,
495 (void*)t
[i
]->b
.buffer
);
497 update_flags
|= SURFACE_BASE_UPDATE_STRMOUT(i
);
499 r600_write_context_reg_seq(cs
, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0
+ 16*i
, 3);
500 r600_write_value(cs
, (t
[i
]->b
.buffer_offset
+
501 t
[i
]->b
.buffer_size
) >> 2); /* BUFFER_SIZE (in DW) */
502 r600_write_value(cs
, stride_in_dw
[i
]); /* VTX_STRIDE (in DW) */
503 r600_write_value(cs
, va
>> 8); /* BUFFER_BASE */
505 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
507 r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, r600_resource(t
[i
]->b
.buffer
),
510 /* R7xx requires this packet after updating BUFFER_BASE.
511 * Without this, R7xx locks up. */
512 if (ctx
->family
>= CHIP_RS780
&& ctx
->family
<= CHIP_RV740
) {
513 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BASE_UPDATE
, 1, 0);
514 cs
->buf
[cs
->cdw
++] = i
;
515 cs
->buf
[cs
->cdw
++] = va
>> 8;
517 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
519 r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, r600_resource(t
[i
]->b
.buffer
),
523 if (ctx
->streamout
.append_bitmask
& (1 << i
)) {
524 va
= r600_resource_va(&ctx
->screen
->screen
,
525 (void*)t
[i
]->buf_filled_size
) + t
[i
]->buf_filled_size_offset
;
527 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
528 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
529 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM
); /* control */
530 cs
->buf
[cs
->cdw
++] = 0; /* unused */
531 cs
->buf
[cs
->cdw
++] = 0; /* unused */
532 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* src address lo */
533 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* src address hi */
535 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
537 r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, t
[i
]->buf_filled_size
,
540 /* Start from the beginning. */
541 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
542 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
543 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET
); /* control */
544 cs
->buf
[cs
->cdw
++] = 0; /* unused */
545 cs
->buf
[cs
->cdw
++] = 0; /* unused */
546 cs
->buf
[cs
->cdw
++] = t
[i
]->b
.buffer_offset
>> 2; /* buffer offset in DW */
547 cs
->buf
[cs
->cdw
++] = 0; /* unused */
552 if (ctx
->family
> CHIP_R600
&& ctx
->family
< CHIP_RV770
) {
553 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0);
554 cs
->buf
[cs
->cdw
++] = update_flags
;
556 ctx
->streamout
.begin_emitted
= true;
559 void r600_emit_streamout_end(struct r600_context
*ctx
)
561 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
562 struct r600_so_target
**t
= ctx
->streamout
.targets
;
566 if (ctx
->chip_class
>= EVERGREEN
) {
567 evergreen_flush_vgt_streamout(ctx
);
569 r600_flush_vgt_streamout(ctx
);
572 for (i
= 0; i
< ctx
->streamout
.num_targets
; i
++) {
574 va
= r600_resource_va(&ctx
->screen
->screen
,
575 (void*)t
[i
]->buf_filled_size
) + t
[i
]->buf_filled_size_offset
;
576 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
577 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
578 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE
) |
579 STRMOUT_STORE_BUFFER_FILLED_SIZE
; /* control */
580 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* dst address lo */
581 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* dst address hi */
582 cs
->buf
[cs
->cdw
++] = 0; /* unused */
583 cs
->buf
[cs
->cdw
++] = 0; /* unused */
585 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
587 r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, t
[i
]->buf_filled_size
,
592 if (ctx
->chip_class
>= EVERGREEN
) {
593 ctx
->flags
|= R600_CONTEXT_STREAMOUT_FLUSH
;
594 evergreen_set_streamout_enable(ctx
, 0);
596 if (ctx
->chip_class
>= R700
) {
597 ctx
->flags
|= R600_CONTEXT_STREAMOUT_FLUSH
;
599 r600_set_streamout_enable(ctx
, 0);
601 ctx
->flags
|= R600_CONTEXT_WAIT_3D_IDLE
| R600_CONTEXT_FLUSH_AND_INV
;
602 ctx
->streamout
.begin_emitted
= false;
605 /* The max number of bytes to copy per packet. */
606 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
608 void r600_cp_dma_copy_buffer(struct r600_context
*rctx
,
609 struct pipe_resource
*dst
, uint64_t dst_offset
,
610 struct pipe_resource
*src
, uint64_t src_offset
,
613 struct radeon_winsys_cs
*cs
= rctx
->rings
.gfx
.cs
;
616 assert(rctx
->screen
->has_cp_dma
);
618 dst_offset
+= r600_resource_va(&rctx
->screen
->screen
, dst
);
619 src_offset
+= r600_resource_va(&rctx
->screen
->screen
, src
);
621 /* We flush the caches, because we might read from or write
622 * to resources which are bound right now. */
623 rctx
->flags
|= R600_CONTEXT_INVAL_READ_CACHES
|
624 R600_CONTEXT_FLUSH_AND_INV
|
625 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
626 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
627 R600_CONTEXT_STREAMOUT_FLUSH
|
628 R600_CONTEXT_WAIT_3D_IDLE
;
630 /* There are differences between R700 and EG in CP DMA,
631 * but we only use the common bits here. */
634 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
635 unsigned src_reloc
, dst_reloc
;
637 r600_need_cs_space(rctx
, 10 + (rctx
->flags
? R600_MAX_FLUSH_CS_DWORDS
: 0), FALSE
);
639 /* Flush the caches for the first copy only. */
641 r600_flush_emit(rctx
);
644 /* Do the synchronization after the last copy, so that all data is written to memory. */
645 if (size
== byte_count
) {
646 sync
= PKT3_CP_DMA_CP_SYNC
;
649 /* This must be done after r600_need_cs_space. */
650 src_reloc
= r600_context_bo_reloc(rctx
, &rctx
->rings
.gfx
, (struct r600_resource
*)src
, RADEON_USAGE_READ
);
651 dst_reloc
= r600_context_bo_reloc(rctx
, &rctx
->rings
.gfx
, (struct r600_resource
*)dst
, RADEON_USAGE_WRITE
);
653 r600_write_value(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
654 r600_write_value(cs
, src_offset
); /* SRC_ADDR_LO [31:0] */
655 r600_write_value(cs
, sync
| ((src_offset
>> 32) & 0xff)); /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */
656 r600_write_value(cs
, dst_offset
); /* DST_ADDR_LO [31:0] */
657 r600_write_value(cs
, (dst_offset
>> 32) & 0xff); /* DST_ADDR_HI [7:0] */
658 r600_write_value(cs
, byte_count
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
660 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0));
661 r600_write_value(cs
, src_reloc
);
662 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0));
663 r600_write_value(cs
, dst_reloc
);
666 src_offset
+= byte_count
;
667 dst_offset
+= byte_count
;
670 /* Invalidate the read caches. */
671 rctx
->flags
|= R600_CONTEXT_INVAL_READ_CACHES
;
673 util_range_add(&r600_resource(dst
)->valid_buffer_range
, dst_offset
,
677 void r600_need_dma_space(struct r600_context
*ctx
, unsigned num_dw
)
679 /* The number of dwords we already used in the DMA so far. */
680 num_dw
+= ctx
->rings
.dma
.cs
->cdw
;
681 /* Flush if there's not enough space. */
682 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
683 ctx
->rings
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
);
687 void r600_dma_copy(struct r600_context
*rctx
,
688 struct pipe_resource
*dst
,
689 struct pipe_resource
*src
,
694 struct radeon_winsys_cs
*cs
= rctx
->rings
.dma
.cs
;
695 unsigned i
, ncopy
, csize
, shift
;
696 struct r600_resource
*rdst
= (struct r600_resource
*)dst
;
697 struct r600_resource
*rsrc
= (struct r600_resource
*)src
;
699 /* make sure that the dma ring is only one active */
700 rctx
->rings
.gfx
.flush(rctx
, RADEON_FLUSH_ASYNC
);
704 ncopy
= (size
/ 0xffff) + !!(size
% 0xffff);
706 r600_need_dma_space(rctx
, ncopy
* 5);
707 for (i
= 0; i
< ncopy
; i
++) {
708 csize
= size
< 0xffff ? size
: 0xffff;
709 /* emit reloc before writting cs so that cs is always in consistent state */
710 r600_context_bo_reloc(rctx
, &rctx
->rings
.dma
, rsrc
, RADEON_USAGE_READ
);
711 r600_context_bo_reloc(rctx
, &rctx
->rings
.dma
, rdst
, RADEON_USAGE_WRITE
);
712 cs
->buf
[cs
->cdw
++] = DMA_PACKET(DMA_PACKET_COPY
, 0, 0, csize
);
713 cs
->buf
[cs
->cdw
++] = dst_offset
& 0xfffffffc;
714 cs
->buf
[cs
->cdw
++] = src_offset
& 0xfffffffc;
715 cs
->buf
[cs
->cdw
++] = (dst_offset
>> 32UL) & 0xff;
716 cs
->buf
[cs
->cdw
++] = (src_offset
>> 32UL) & 0xff;
717 dst_offset
+= csize
<< shift
;
718 src_offset
+= csize
<< shift
;
722 util_range_add(&rdst
->valid_buffer_range
, dst_offset
,