2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "r600_pipe.h"
28 #include "util/u_memory.h"
33 void r600_need_cs_space(struct r600_context
*ctx
, unsigned num_dw
,
34 boolean count_draw_in
)
37 if (!ctx
->b
.ws
->cs_memory_below_limit(ctx
->b
.rings
.gfx
.cs
, ctx
->b
.vram
, ctx
->b
.gtt
)) {
40 ctx
->b
.rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
43 /* all will be accounted once relocation are emited */
47 /* The number of dwords we already used in the CS so far. */
48 num_dw
+= ctx
->b
.rings
.gfx
.cs
->cdw
;
53 /* The number of dwords all the dirty states would take. */
54 for (i
= 0; i
< R600_NUM_ATOMS
; i
++) {
55 if (ctx
->atoms
[i
] && ctx
->atoms
[i
]->dirty
) {
56 num_dw
+= ctx
->atoms
[i
]->num_dw
;
57 if (ctx
->screen
->b
.trace_bo
) {
58 num_dw
+= R600_TRACE_CS_DWORDS
;
63 /* The upper-bound of how much space a draw command would take. */
64 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
+ R600_MAX_DRAW_CS_DWORDS
;
65 if (ctx
->screen
->b
.trace_bo
) {
66 num_dw
+= R600_TRACE_CS_DWORDS
;
70 /* Count in queries_suspend. */
71 num_dw
+= ctx
->b
.num_cs_dw_nontimer_queries_suspend
+
72 ctx
->b
.num_cs_dw_timer_queries_suspend
;
74 /* Count in streamout_end at the end of CS. */
75 if (ctx
->b
.streamout
.begin_emitted
) {
76 num_dw
+= ctx
->b
.streamout
.num_dw_for_end
;
79 /* Count in render_condition(NULL) at the end of CS. */
80 if (ctx
->b
.predicate_drawing
) {
85 if (ctx
->b
.chip_class
== R600
) {
89 /* Count in framebuffer cache flushes at the end of CS. */
90 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
;
92 /* The fence at the end of CS. */
95 /* Flush if there's not enough space. */
96 if (num_dw
> ctx
->b
.rings
.gfx
.cs
->max_dw
) {
97 ctx
->b
.rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
101 void r600_flush_emit(struct r600_context
*rctx
)
103 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
104 unsigned cp_coher_cntl
= 0;
105 unsigned wait_until
= 0;
107 if (!rctx
->b
.flags
) {
111 if (rctx
->b
.flags
& R600_CONTEXT_WAIT_3D_IDLE
) {
112 wait_until
|= S_008040_WAIT_3D_IDLE(1);
114 if (rctx
->b
.flags
& R600_CONTEXT_WAIT_CP_DMA_IDLE
) {
115 wait_until
|= S_008040_WAIT_CP_DMA_IDLE(1);
119 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
120 if (rctx
->b
.family
>= CHIP_CAYMAN
) {
121 /* emit a PS partial flush on Cayman/TN */
122 rctx
->b
.flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
126 if (rctx
->b
.flags
& R600_CONTEXT_PS_PARTIAL_FLUSH
) {
127 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
128 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
131 if (rctx
->b
.chip_class
>= R700
&&
132 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
)) {
133 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
134 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0);
137 if (rctx
->b
.chip_class
>= R700
&&
138 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_DB_META
)) {
139 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
140 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0);
142 /* Set FULL_CACHE_ENA for DB META flushes on r7xx and later.
144 * This hack predates use of FLUSH_AND_INV_DB_META, so it's
145 * unclear whether it's still needed or even whether it has
148 cp_coher_cntl
|= S_0085F0_FULL_CACHE_ENA(1);
151 if (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV
||
152 (rctx
->b
.chip_class
== R600
&& rctx
->b
.flags
& R600_CONTEXT_STREAMOUT_FLUSH
)) {
153 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
154 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0);
157 if (rctx
->b
.flags
& R600_CONTEXT_INV_CONST_CACHE
) {
158 /* Direct constant addressing uses the shader cache.
159 * Indirect contant addressing uses the vertex cache. */
160 cp_coher_cntl
|= S_0085F0_SH_ACTION_ENA(1) |
161 (rctx
->has_vertex_cache
? S_0085F0_VC_ACTION_ENA(1)
162 : S_0085F0_TC_ACTION_ENA(1));
164 if (rctx
->b
.flags
& R600_CONTEXT_INV_VERTEX_CACHE
) {
165 cp_coher_cntl
|= rctx
->has_vertex_cache
? S_0085F0_VC_ACTION_ENA(1)
166 : S_0085F0_TC_ACTION_ENA(1);
168 if (rctx
->b
.flags
& R600_CONTEXT_INV_TEX_CACHE
) {
169 /* Textures use the texture cache.
170 * Texture buffer objects use the vertex cache. */
171 cp_coher_cntl
|= S_0085F0_TC_ACTION_ENA(1) |
172 (rctx
->has_vertex_cache
? S_0085F0_VC_ACTION_ENA(1) : 0);
175 /* Don't use the DB CP COHER logic on r6xx.
178 if (rctx
->b
.chip_class
>= R700
&&
179 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_DB
)) {
180 cp_coher_cntl
|= S_0085F0_DB_ACTION_ENA(1) |
181 S_0085F0_DB_DEST_BASE_ENA(1) |
182 S_0085F0_SMX_ACTION_ENA(1);
185 /* Don't use the CB CP COHER logic on r6xx.
188 if (rctx
->b
.chip_class
>= R700
&&
189 (rctx
->b
.flags
& R600_CONTEXT_FLUSH_AND_INV_CB
)) {
190 cp_coher_cntl
|= S_0085F0_CB_ACTION_ENA(1) |
191 S_0085F0_CB0_DEST_BASE_ENA(1) |
192 S_0085F0_CB1_DEST_BASE_ENA(1) |
193 S_0085F0_CB2_DEST_BASE_ENA(1) |
194 S_0085F0_CB3_DEST_BASE_ENA(1) |
195 S_0085F0_CB4_DEST_BASE_ENA(1) |
196 S_0085F0_CB5_DEST_BASE_ENA(1) |
197 S_0085F0_CB6_DEST_BASE_ENA(1) |
198 S_0085F0_CB7_DEST_BASE_ENA(1) |
199 S_0085F0_SMX_ACTION_ENA(1);
200 if (rctx
->b
.chip_class
>= EVERGREEN
)
201 cp_coher_cntl
|= S_0085F0_CB8_DEST_BASE_ENA(1) |
202 S_0085F0_CB9_DEST_BASE_ENA(1) |
203 S_0085F0_CB10_DEST_BASE_ENA(1) |
204 S_0085F0_CB11_DEST_BASE_ENA(1);
207 if (rctx
->b
.chip_class
>= R700
&&
208 rctx
->b
.flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
209 cp_coher_cntl
|= S_0085F0_SO0_DEST_BASE_ENA(1) |
210 S_0085F0_SO1_DEST_BASE_ENA(1) |
211 S_0085F0_SO2_DEST_BASE_ENA(1) |
212 S_0085F0_SO3_DEST_BASE_ENA(1) |
213 S_0085F0_SMX_ACTION_ENA(1);
216 /* Workaround for buggy flushing on some R6xx chipsets. */
217 if ((rctx
->b
.flags
& (R600_CONTEXT_FLUSH_AND_INV
|
218 R600_CONTEXT_STREAMOUT_FLUSH
)) &&
219 (rctx
->b
.family
== CHIP_RV670
||
220 rctx
->b
.family
== CHIP_RS780
||
221 rctx
->b
.family
== CHIP_RS880
)) {
222 cp_coher_cntl
|= S_0085F0_CB1_DEST_BASE_ENA(1) |
223 S_0085F0_DEST_BASE_0_ENA(1);
227 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_SYNC
, 3, 0);
228 cs
->buf
[cs
->cdw
++] = cp_coher_cntl
; /* CP_COHER_CNTL */
229 cs
->buf
[cs
->cdw
++] = 0xffffffff; /* CP_COHER_SIZE */
230 cs
->buf
[cs
->cdw
++] = 0; /* CP_COHER_BASE */
231 cs
->buf
[cs
->cdw
++] = 0x0000000A; /* POLL_INTERVAL */
235 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
236 if (rctx
->b
.family
< CHIP_CAYMAN
) {
237 /* wait for things to settle */
238 r600_write_config_reg(cs
, R_008040_WAIT_UNTIL
, wait_until
);
242 /* everything is properly flushed */
246 void r600_context_gfx_flush(void *context
, unsigned flags
,
247 struct pipe_fence_handle
**fence
)
249 struct r600_context
*ctx
= context
;
250 struct radeon_winsys_cs
*cs
= ctx
->b
.rings
.gfx
.cs
;
252 if (cs
->cdw
== ctx
->b
.initial_gfx_cs_size
&& !fence
)
255 ctx
->b
.rings
.gfx
.flushing
= true;
257 r600_preflush_suspend_features(&ctx
->b
);
259 /* flush the framebuffer cache */
260 ctx
->b
.flags
|= R600_CONTEXT_FLUSH_AND_INV
|
261 R600_CONTEXT_FLUSH_AND_INV_CB
|
262 R600_CONTEXT_FLUSH_AND_INV_DB
|
263 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
264 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
265 R600_CONTEXT_WAIT_3D_IDLE
|
266 R600_CONTEXT_WAIT_CP_DMA_IDLE
;
268 r600_flush_emit(ctx
);
270 /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
271 if (ctx
->b
.chip_class
== R600
) {
272 r600_write_context_reg(cs
, R_028350_SX_MISC
, 0);
275 /* force to keep tiling flags */
276 if (ctx
->keep_tiling_flags
) {
277 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
281 ctx
->b
.ws
->cs_flush(cs
, flags
, fence
, ctx
->screen
->b
.cs_count
++);
282 ctx
->b
.rings
.gfx
.flushing
= false;
284 r600_begin_new_cs(ctx
);
287 void r600_begin_new_cs(struct r600_context
*ctx
)
295 /* Begin a new CS. */
296 r600_emit_command_buffer(ctx
->b
.rings
.gfx
.cs
, &ctx
->start_cs_cmd
);
298 /* Re-emit states. */
299 ctx
->alphatest_state
.atom
.dirty
= true;
300 ctx
->blend_color
.atom
.dirty
= true;
301 ctx
->cb_misc_state
.atom
.dirty
= true;
302 ctx
->clip_misc_state
.atom
.dirty
= true;
303 ctx
->clip_state
.atom
.dirty
= true;
304 ctx
->db_misc_state
.atom
.dirty
= true;
305 ctx
->db_state
.atom
.dirty
= true;
306 ctx
->framebuffer
.atom
.dirty
= true;
307 ctx
->pixel_shader
.atom
.dirty
= true;
308 ctx
->poly_offset_state
.atom
.dirty
= true;
309 ctx
->vgt_state
.atom
.dirty
= true;
310 ctx
->sample_mask
.atom
.dirty
= true;
311 for (i
= 0; i
< R600_MAX_VIEWPORTS
; i
++) {
312 ctx
->scissor
[i
].atom
.dirty
= true;
313 ctx
->viewport
[i
].atom
.dirty
= true;
315 ctx
->config_state
.atom
.dirty
= true;
316 ctx
->stencil_ref
.atom
.dirty
= true;
317 ctx
->vertex_fetch_shader
.atom
.dirty
= true;
318 ctx
->export_shader
.atom
.dirty
= true;
319 ctx
->shader_stages
.atom
.dirty
= true;
320 if (ctx
->gs_shader
) {
321 ctx
->geometry_shader
.atom
.dirty
= true;
322 ctx
->gs_rings
.atom
.dirty
= true;
324 ctx
->vertex_shader
.atom
.dirty
= true;
325 ctx
->b
.streamout
.enable_atom
.dirty
= true;
327 if (ctx
->blend_state
.cso
)
328 ctx
->blend_state
.atom
.dirty
= true;
329 if (ctx
->dsa_state
.cso
)
330 ctx
->dsa_state
.atom
.dirty
= true;
331 if (ctx
->rasterizer_state
.cso
)
332 ctx
->rasterizer_state
.atom
.dirty
= true;
334 if (ctx
->b
.chip_class
<= R700
) {
335 ctx
->seamless_cube_map
.atom
.dirty
= true;
338 ctx
->vertex_buffer_state
.dirty_mask
= ctx
->vertex_buffer_state
.enabled_mask
;
339 r600_vertex_buffers_dirty(ctx
);
341 /* Re-emit shader resources. */
342 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
343 struct r600_constbuf_state
*constbuf
= &ctx
->constbuf_state
[shader
];
344 struct r600_textures_info
*samplers
= &ctx
->samplers
[shader
];
346 constbuf
->dirty_mask
= constbuf
->enabled_mask
;
347 samplers
->views
.dirty_mask
= samplers
->views
.enabled_mask
;
348 samplers
->states
.dirty_mask
= samplers
->states
.enabled_mask
;
350 r600_constant_buffers_dirty(ctx
, constbuf
);
351 r600_sampler_views_dirty(ctx
, &samplers
->views
);
352 r600_sampler_states_dirty(ctx
, &samplers
->states
);
355 r600_postflush_resume_features(&ctx
->b
);
357 /* Re-emit the draw state. */
358 ctx
->last_primitive_type
= -1;
359 ctx
->last_start_instance
= -1;
361 ctx
->b
.initial_gfx_cs_size
= ctx
->b
.rings
.gfx
.cs
->cdw
;
364 /* The max number of bytes to copy per packet. */
365 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
367 void r600_cp_dma_copy_buffer(struct r600_context
*rctx
,
368 struct pipe_resource
*dst
, uint64_t dst_offset
,
369 struct pipe_resource
*src
, uint64_t src_offset
,
372 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
375 assert(rctx
->screen
->b
.has_cp_dma
);
377 /* Mark the buffer range of destination as valid (initialized),
378 * so that transfer_map knows it should wait for the GPU when mapping
380 util_range_add(&r600_resource(dst
)->valid_buffer_range
, dst_offset
,
383 dst_offset
+= r600_resource(dst
)->gpu_address
;
384 src_offset
+= r600_resource(src
)->gpu_address
;
386 /* Flush the caches where the resources are bound. */
387 rctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
|
388 R600_CONTEXT_INV_VERTEX_CACHE
|
389 R600_CONTEXT_INV_TEX_CACHE
|
390 R600_CONTEXT_FLUSH_AND_INV
|
391 R600_CONTEXT_FLUSH_AND_INV_CB
|
392 R600_CONTEXT_FLUSH_AND_INV_DB
|
393 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
394 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
395 R600_CONTEXT_STREAMOUT_FLUSH
|
396 R600_CONTEXT_WAIT_3D_IDLE
;
398 /* There are differences between R700 and EG in CP DMA,
399 * but we only use the common bits here. */
402 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
403 unsigned src_reloc
, dst_reloc
;
405 r600_need_cs_space(rctx
, 10 + (rctx
->b
.flags
? R600_MAX_FLUSH_CS_DWORDS
: 0), FALSE
);
407 /* Flush the caches for the first copy only. */
409 r600_flush_emit(rctx
);
412 /* Do the synchronization after the last copy, so that all data is written to memory. */
413 if (size
== byte_count
) {
414 sync
= PKT3_CP_DMA_CP_SYNC
;
417 /* This must be done after r600_need_cs_space. */
418 src_reloc
= r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
, (struct r600_resource
*)src
,
419 RADEON_USAGE_READ
, RADEON_PRIO_MIN
);
420 dst_reloc
= r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
, (struct r600_resource
*)dst
,
421 RADEON_USAGE_WRITE
, RADEON_PRIO_MIN
);
423 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
424 radeon_emit(cs
, src_offset
); /* SRC_ADDR_LO [31:0] */
425 radeon_emit(cs
, sync
| ((src_offset
>> 32) & 0xff)); /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */
426 radeon_emit(cs
, dst_offset
); /* DST_ADDR_LO [31:0] */
427 radeon_emit(cs
, (dst_offset
>> 32) & 0xff); /* DST_ADDR_HI [7:0] */
428 radeon_emit(cs
, byte_count
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
430 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
431 radeon_emit(cs
, src_reloc
);
432 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
433 radeon_emit(cs
, dst_reloc
);
436 src_offset
+= byte_count
;
437 dst_offset
+= byte_count
;
440 /* Invalidate the read caches. */
441 rctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
|
442 R600_CONTEXT_INV_VERTEX_CACHE
|
443 R600_CONTEXT_INV_TEX_CACHE
;
446 void r600_dma_copy_buffer(struct r600_context
*rctx
,
447 struct pipe_resource
*dst
,
448 struct pipe_resource
*src
,
453 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.dma
.cs
;
454 unsigned i
, ncopy
, csize
;
455 struct r600_resource
*rdst
= (struct r600_resource
*)dst
;
456 struct r600_resource
*rsrc
= (struct r600_resource
*)src
;
458 /* Mark the buffer range of destination as valid (initialized),
459 * so that transfer_map knows it should wait for the GPU when mapping
461 util_range_add(&rdst
->valid_buffer_range
, dst_offset
,
464 size
>>= 2; /* convert to dwords */
465 ncopy
= (size
/ R600_DMA_COPY_MAX_SIZE_DW
) + !!(size
% R600_DMA_COPY_MAX_SIZE_DW
);
467 r600_need_dma_space(&rctx
->b
, ncopy
* 5);
468 for (i
= 0; i
< ncopy
; i
++) {
469 csize
= size
< R600_DMA_COPY_MAX_SIZE_DW
? size
: R600_DMA_COPY_MAX_SIZE_DW
;
470 /* emit reloc before writing cs so that cs is always in consistent state */
471 r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.dma
, rsrc
, RADEON_USAGE_READ
,
473 r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.dma
, rdst
, RADEON_USAGE_WRITE
,
475 cs
->buf
[cs
->cdw
++] = DMA_PACKET(DMA_PACKET_COPY
, 0, 0, csize
);
476 cs
->buf
[cs
->cdw
++] = dst_offset
& 0xfffffffc;
477 cs
->buf
[cs
->cdw
++] = src_offset
& 0xfffffffc;
478 cs
->buf
[cs
->cdw
++] = (dst_offset
>> 32UL) & 0xff;
479 cs
->buf
[cs
->cdw
++] = (src_offset
>> 32UL) & 0xff;
480 dst_offset
+= csize
<< 2;
481 src_offset
+= csize
<< 2;