2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "r600_hw_context_priv.h"
28 #include "util/u_memory.h"
32 /* Get backends mask */
33 void r600_get_backend_mask(struct r600_context
*ctx
)
35 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
36 struct r600_resource
*buffer
;
38 unsigned num_backends
= ctx
->screen
->info
.r600_num_backends
;
42 /* if backend_map query is supported by the kernel */
43 if (ctx
->screen
->info
.r600_backend_map_valid
) {
44 unsigned num_tile_pipes
= ctx
->screen
->info
.r600_num_tile_pipes
;
45 unsigned backend_map
= ctx
->screen
->info
.r600_backend_map
;
46 unsigned item_width
, item_mask
;
48 if (ctx
->chip_class
>= EVERGREEN
) {
56 while(num_tile_pipes
--) {
57 i
= backend_map
& item_mask
;
59 backend_map
>>= item_width
;
62 ctx
->backend_mask
= mask
;
67 /* otherwise backup path for older kernels */
69 /* create buffer for event data */
70 buffer
= (struct r600_resource
*)
71 pipe_buffer_create(&ctx
->screen
->screen
, PIPE_BIND_CUSTOM
,
72 PIPE_USAGE_STAGING
, ctx
->max_db
*16);
75 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)buffer
);
77 /* initialize buffer with zeroes */
78 results
= r600_buffer_mmap_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_WRITE
);
80 memset(results
, 0, ctx
->max_db
* 4 * 4);
81 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
83 /* emit EVENT_WRITE for ZPASS_DONE */
84 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 2, 0);
85 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1);
86 cs
->buf
[cs
->cdw
++] = va
;
87 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
89 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
90 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, buffer
, RADEON_USAGE_WRITE
);
93 results
= r600_buffer_mmap_sync_with_rings(ctx
, buffer
, PIPE_TRANSFER_READ
);
95 for(i
= 0; i
< ctx
->max_db
; i
++) {
96 /* at least highest bit will be set if backend is used */
100 ctx
->ws
->buffer_unmap(buffer
->cs_buf
);
104 pipe_resource_reference((struct pipe_resource
**)&buffer
, NULL
);
107 ctx
->backend_mask
= mask
;
112 /* fallback to old method - set num_backends lower bits to 1 */
113 ctx
->backend_mask
= (~((uint32_t)0))>>(32-num_backends
);
117 static void r600_init_block(struct r600_context
*ctx
,
118 struct r600_block
*block
,
119 const struct r600_reg
*reg
, int index
, int nreg
,
120 unsigned opcode
, unsigned offset_base
)
125 /* initialize block */
127 block
->status
|= R600_BLOCK_STATUS_DIRTY
; /* dirty all blocks at start */
128 block
->start_offset
= reg
[i
].offset
;
129 block
->pm4
[block
->pm4_ndwords
++] = PKT3(opcode
, n
, 0);
130 block
->pm4
[block
->pm4_ndwords
++] = (block
->start_offset
- offset_base
) >> 2;
131 block
->reg
= &block
->pm4
[block
->pm4_ndwords
];
132 block
->pm4_ndwords
+= n
;
134 block
->nreg_dirty
= n
;
135 LIST_INITHEAD(&block
->list
);
136 LIST_INITHEAD(&block
->enable_list
);
138 for (j
= 0; j
< n
; j
++) {
139 if (reg
[i
+j
].flags
& REG_FLAG_DIRTY_ALWAYS
) {
140 block
->flags
|= REG_FLAG_DIRTY_ALWAYS
;
142 if (reg
[i
+j
].flags
& REG_FLAG_ENABLE_ALWAYS
) {
143 if (!(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
144 block
->status
|= R600_BLOCK_STATUS_ENABLED
;
145 LIST_ADDTAIL(&block
->enable_list
, &ctx
->enable_list
);
146 LIST_ADDTAIL(&block
->list
,&ctx
->dirty
);
149 if (reg
[i
+j
].flags
& REG_FLAG_FLUSH_CHANGE
) {
150 block
->flags
|= REG_FLAG_FLUSH_CHANGE
;
153 if (reg
[i
+j
].flags
& REG_FLAG_NEED_BO
) {
155 assert(block
->nbo
< R600_BLOCK_MAX_BO
);
156 block
->pm4_bo_index
[j
] = block
->nbo
;
157 block
->pm4
[block
->pm4_ndwords
++] = PKT3(PKT3_NOP
, 0, 0);
158 block
->pm4
[block
->pm4_ndwords
++] = 0x00000000;
159 block
->reloc
[block
->nbo
].bo_pm4_index
= block
->pm4_ndwords
- 1;
162 /* check that we stay in limit */
163 assert(block
->pm4_ndwords
< R600_BLOCK_MAX_REG
);
166 int r600_context_add_block(struct r600_context
*ctx
, const struct r600_reg
*reg
, unsigned nreg
,
167 unsigned opcode
, unsigned offset_base
)
169 struct r600_block
*block
;
170 struct r600_range
*range
;
173 for (unsigned i
= 0, n
= 0; i
< nreg
; i
+= n
) {
174 /* ignore new block balise */
175 if (reg
[i
].offset
== GROUP_FORCE_NEW_BLOCK
) {
180 /* register that need relocation are in their own group */
181 /* find number of consecutive registers */
183 offset
= reg
[i
].offset
;
184 while (reg
[i
+ n
].offset
== offset
) {
189 if (n
>= (R600_BLOCK_MAX_REG
- 2))
193 /* allocate new block */
194 block
= calloc(1, sizeof(struct r600_block
));
199 for (int j
= 0; j
< n
; j
++) {
200 range
= &ctx
->range
[CTX_RANGE_ID(reg
[i
+ j
].offset
)];
201 /* create block table if it doesn't exist */
203 range
->blocks
= calloc(1 << HASH_SHIFT
, sizeof(void *));
204 if (!range
->blocks
) {
209 range
->blocks
[CTX_BLOCK_ID(reg
[i
+ j
].offset
)] = block
;
212 r600_init_block(ctx
, block
, reg
, i
, n
, opcode
, offset_base
);
218 static const struct r600_reg r600_context_reg_list
[] = {
219 {R_028D24_DB_HTILE_SURFACE
, 0, 0},
220 {R_028614_SPI_VS_OUT_ID_0
, 0, 0},
221 {R_028618_SPI_VS_OUT_ID_1
, 0, 0},
222 {R_02861C_SPI_VS_OUT_ID_2
, 0, 0},
223 {R_028620_SPI_VS_OUT_ID_3
, 0, 0},
224 {R_028624_SPI_VS_OUT_ID_4
, 0, 0},
225 {R_028628_SPI_VS_OUT_ID_5
, 0, 0},
226 {R_02862C_SPI_VS_OUT_ID_6
, 0, 0},
227 {R_028630_SPI_VS_OUT_ID_7
, 0, 0},
228 {R_028634_SPI_VS_OUT_ID_8
, 0, 0},
229 {R_028638_SPI_VS_OUT_ID_9
, 0, 0},
230 {R_0286C4_SPI_VS_OUT_CONFIG
, 0, 0},
231 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
232 {R_028858_SQ_PGM_START_VS
, REG_FLAG_NEED_BO
, 0},
233 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
234 {R_028868_SQ_PGM_RESOURCES_VS
, 0, 0},
235 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
236 {R_0288A4_SQ_PGM_RESOURCES_FS
, 0, 0},
237 {R_0288DC_SQ_PGM_CF_OFFSET_FS
, 0, 0},
238 {R_028644_SPI_PS_INPUT_CNTL_0
, 0, 0},
239 {R_028648_SPI_PS_INPUT_CNTL_1
, 0, 0},
240 {R_02864C_SPI_PS_INPUT_CNTL_2
, 0, 0},
241 {R_028650_SPI_PS_INPUT_CNTL_3
, 0, 0},
242 {R_028654_SPI_PS_INPUT_CNTL_4
, 0, 0},
243 {R_028658_SPI_PS_INPUT_CNTL_5
, 0, 0},
244 {R_02865C_SPI_PS_INPUT_CNTL_6
, 0, 0},
245 {R_028660_SPI_PS_INPUT_CNTL_7
, 0, 0},
246 {R_028664_SPI_PS_INPUT_CNTL_8
, 0, 0},
247 {R_028668_SPI_PS_INPUT_CNTL_9
, 0, 0},
248 {R_02866C_SPI_PS_INPUT_CNTL_10
, 0, 0},
249 {R_028670_SPI_PS_INPUT_CNTL_11
, 0, 0},
250 {R_028674_SPI_PS_INPUT_CNTL_12
, 0, 0},
251 {R_028678_SPI_PS_INPUT_CNTL_13
, 0, 0},
252 {R_02867C_SPI_PS_INPUT_CNTL_14
, 0, 0},
253 {R_028680_SPI_PS_INPUT_CNTL_15
, 0, 0},
254 {R_028684_SPI_PS_INPUT_CNTL_16
, 0, 0},
255 {R_028688_SPI_PS_INPUT_CNTL_17
, 0, 0},
256 {R_02868C_SPI_PS_INPUT_CNTL_18
, 0, 0},
257 {R_028690_SPI_PS_INPUT_CNTL_19
, 0, 0},
258 {R_028694_SPI_PS_INPUT_CNTL_20
, 0, 0},
259 {R_028698_SPI_PS_INPUT_CNTL_21
, 0, 0},
260 {R_02869C_SPI_PS_INPUT_CNTL_22
, 0, 0},
261 {R_0286A0_SPI_PS_INPUT_CNTL_23
, 0, 0},
262 {R_0286A4_SPI_PS_INPUT_CNTL_24
, 0, 0},
263 {R_0286A8_SPI_PS_INPUT_CNTL_25
, 0, 0},
264 {R_0286AC_SPI_PS_INPUT_CNTL_26
, 0, 0},
265 {R_0286B0_SPI_PS_INPUT_CNTL_27
, 0, 0},
266 {R_0286B4_SPI_PS_INPUT_CNTL_28
, 0, 0},
267 {R_0286B8_SPI_PS_INPUT_CNTL_29
, 0, 0},
268 {R_0286BC_SPI_PS_INPUT_CNTL_30
, 0, 0},
269 {R_0286C0_SPI_PS_INPUT_CNTL_31
, 0, 0},
270 {R_0286CC_SPI_PS_IN_CONTROL_0
, 0, 0},
271 {R_0286D0_SPI_PS_IN_CONTROL_1
, 0, 0},
272 {R_0286D8_SPI_INPUT_Z
, 0, 0},
273 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
274 {R_028840_SQ_PGM_START_PS
, REG_FLAG_NEED_BO
, 0},
275 {GROUP_FORCE_NEW_BLOCK
, 0, 0},
276 {R_028850_SQ_PGM_RESOURCES_PS
, 0, 0},
277 {R_028854_SQ_PGM_EXPORTS_PS
, 0, 0},
281 void r600_context_fini(struct r600_context
*ctx
)
283 struct r600_block
*block
;
284 struct r600_range
*range
;
287 for (int i
= 0; i
< NUM_RANGES
; i
++) {
288 if (!ctx
->range
[i
].blocks
)
290 for (int j
= 0; j
< (1 << HASH_SHIFT
); j
++) {
291 block
= ctx
->range
[i
].blocks
[j
];
293 for (int k
= 0, offset
= block
->start_offset
; k
< block
->nreg
; k
++, offset
+= 4) {
294 range
= &ctx
->range
[CTX_RANGE_ID(offset
)];
295 range
->blocks
[CTX_BLOCK_ID(offset
)] = NULL
;
297 for (int k
= 1; k
<= block
->nbo
; k
++) {
298 pipe_resource_reference((struct pipe_resource
**)&block
->reloc
[k
].bo
, NULL
);
303 free(ctx
->range
[i
].blocks
);
309 int r600_setup_block_table(struct r600_context
*ctx
)
311 /* setup block table */
313 ctx
->blocks
= calloc(ctx
->nblocks
, sizeof(void*));
316 for (int i
= 0; i
< NUM_RANGES
; i
++) {
317 if (!ctx
->range
[i
].blocks
)
319 for (int j
= 0, add
; j
< (1 << HASH_SHIFT
); j
++) {
320 if (!ctx
->range
[i
].blocks
[j
])
324 for (int k
= 0; k
< c
; k
++) {
325 if (ctx
->blocks
[k
] == ctx
->range
[i
].blocks
[j
]) {
331 assert(c
< ctx
->nblocks
);
332 ctx
->blocks
[c
++] = ctx
->range
[i
].blocks
[j
];
333 j
+= (ctx
->range
[i
].blocks
[j
]->nreg
) - 1;
340 int r600_context_init(struct r600_context
*ctx
)
345 r
= r600_context_add_block(ctx
, r600_context_reg_list
,
346 Elements(r600_context_reg_list
), PKT3_SET_CONTEXT_REG
, R600_CONTEXT_REG_OFFSET
);
350 r
= r600_setup_block_table(ctx
);
357 r600_context_fini(ctx
);
361 void r600_need_cs_space(struct r600_context
*ctx
, unsigned num_dw
,
362 boolean count_draw_in
)
364 if (!ctx
->ws
->cs_memory_below_limit(ctx
->rings
.gfx
.cs
, ctx
->vram
, ctx
->gtt
)) {
367 ctx
->rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
);
370 /* all will be accounted once relocation are emited */
374 /* The number of dwords we already used in the CS so far. */
375 num_dw
+= ctx
->rings
.gfx
.cs
->cdw
;
380 /* The number of dwords all the dirty states would take. */
381 for (i
= 0; i
< R600_NUM_ATOMS
; i
++) {
382 if (ctx
->atoms
[i
] && ctx
->atoms
[i
]->dirty
) {
383 num_dw
+= ctx
->atoms
[i
]->num_dw
;
385 if (ctx
->screen
->trace_bo
) {
386 num_dw
+= R600_TRACE_CS_DWORDS
;
392 num_dw
+= ctx
->pm4_dirty_cdwords
;
394 /* The upper-bound of how much space a draw command would take. */
395 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
+ R600_MAX_DRAW_CS_DWORDS
;
397 if (ctx
->screen
->trace_bo
) {
398 num_dw
+= R600_TRACE_CS_DWORDS
;
403 /* Count in queries_suspend. */
404 num_dw
+= ctx
->num_cs_dw_nontimer_queries_suspend
;
406 /* Count in streamout_end at the end of CS. */
407 if (ctx
->streamout
.begin_emitted
) {
408 num_dw
+= ctx
->streamout
.num_dw_for_end
;
411 /* Count in render_condition(NULL) at the end of CS. */
412 if (ctx
->predicate_drawing
) {
417 if (ctx
->chip_class
<= R700
) {
421 /* Count in framebuffer cache flushes at the end of CS. */
422 num_dw
+= R600_MAX_FLUSH_CS_DWORDS
;
424 /* The fence at the end of CS. */
427 /* Flush if there's not enough space. */
428 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
429 ctx
->rings
.gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
);
433 void r600_context_dirty_block(struct r600_context
*ctx
,
434 struct r600_block
*block
,
435 int dirty
, int index
)
437 if ((index
+ 1) > block
->nreg_dirty
)
438 block
->nreg_dirty
= index
+ 1;
440 if ((dirty
!= (block
->status
& R600_BLOCK_STATUS_DIRTY
)) || !(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
441 block
->status
|= R600_BLOCK_STATUS_DIRTY
;
442 ctx
->pm4_dirty_cdwords
+= block
->pm4_ndwords
;
443 if (!(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
444 block
->status
|= R600_BLOCK_STATUS_ENABLED
;
445 LIST_ADDTAIL(&block
->enable_list
, &ctx
->enable_list
);
447 LIST_ADDTAIL(&block
->list
,&ctx
->dirty
);
449 if (block
->flags
& REG_FLAG_FLUSH_CHANGE
) {
450 ctx
->flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
456 * If reg needs a reloc, this function will add it to its block's reloc list.
457 * @return true if reg needs a reloc, false otherwise
459 static bool r600_reg_set_block_reloc(struct r600_pipe_reg
*reg
)
463 if (!reg
->block
->pm4_bo_index
[reg
->id
]) {
466 /* find relocation */
467 reloc_id
= reg
->block
->pm4_bo_index
[reg
->id
];
468 pipe_resource_reference(
469 (struct pipe_resource
**)®
->block
->reloc
[reloc_id
].bo
,
471 reg
->block
->reloc
[reloc_id
].bo_usage
= reg
->bo_usage
;
476 * This function will emit all the registers in state directly to the command
477 * stream allowing you to bypass the r600_context dirty list.
479 * This is used for dispatching compute shaders to avoid mixing compute and
480 * 3D states in the context's dirty list.
482 * @param pkt_flags Should be either 0 or RADEON_CP_PACKET3_COMPUTE_MODE. This
483 * value will be passed on to r600_context_block_emit_dirty an or'd against
486 void r600_context_pipe_state_emit(struct r600_context
*ctx
,
487 struct r600_pipe_state
*state
,
492 /* Mark all blocks as dirty:
493 * Since two registers can be in the same block, we need to make sure
494 * we mark all the blocks dirty before we emit any of them. If we were
495 * to mark blocks dirty and emit them in the same loop, like this:
497 * foreach (reg in state->regs) {
498 * mark_dirty(reg->block)
499 * emit_block(reg->block)
502 * Then if we have two registers in this state that are in the same
503 * block, we would end up emitting that block twice.
505 for (i
= 0; i
< state
->nregs
; i
++) {
506 struct r600_pipe_reg
*reg
= &state
->regs
[i
];
507 /* Mark all the registers in the block as dirty */
508 reg
->block
->nreg_dirty
= reg
->block
->nreg
;
509 reg
->block
->status
|= R600_BLOCK_STATUS_DIRTY
;
510 /* Update the reloc for this register if necessary. */
511 r600_reg_set_block_reloc(reg
);
514 /* Emit the registers writes */
515 for (i
= 0; i
< state
->nregs
; i
++) {
516 struct r600_pipe_reg
*reg
= &state
->regs
[i
];
517 if (reg
->block
->status
& R600_BLOCK_STATUS_DIRTY
) {
518 r600_context_block_emit_dirty(ctx
, reg
->block
, pkt_flags
);
523 void r600_context_pipe_state_set(struct r600_context
*ctx
, struct r600_pipe_state
*state
)
525 struct r600_block
*block
;
527 for (int i
= 0; i
< state
->nregs
; i
++) {
529 struct r600_pipe_reg
*reg
= &state
->regs
[i
];
534 dirty
= block
->status
& R600_BLOCK_STATUS_DIRTY
;
536 if (reg
->value
!= block
->reg
[id
]) {
537 block
->reg
[id
] = reg
->value
;
538 dirty
|= R600_BLOCK_STATUS_DIRTY
;
540 if (block
->flags
& REG_FLAG_DIRTY_ALWAYS
)
541 dirty
|= R600_BLOCK_STATUS_DIRTY
;
542 if (r600_reg_set_block_reloc(reg
)) {
543 /* always force dirty for relocs for now */
544 dirty
|= R600_BLOCK_STATUS_DIRTY
;
548 r600_context_dirty_block(ctx
, block
, dirty
, id
);
553 * @param pkt_flags should be set to RADEON_CP_PACKET3_COMPUTE_MODE if this
554 * block will be used for compute shaders.
556 void r600_context_block_emit_dirty(struct r600_context
*ctx
, struct r600_block
*block
,
559 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
560 int optional
= block
->nbo
== 0 && !(block
->flags
& REG_FLAG_DIRTY_ALWAYS
);
561 int cp_dwords
= block
->pm4_ndwords
, start_dword
= 0;
563 int nbo
= block
->nbo
;
565 if (block
->nreg_dirty
== 0 && optional
) {
570 for (int j
= 0; j
< block
->nreg
; j
++) {
571 if (block
->pm4_bo_index
[j
]) {
572 /* find relocation */
573 struct r600_block_reloc
*reloc
= &block
->reloc
[block
->pm4_bo_index
[j
]];
575 block
->pm4
[reloc
->bo_pm4_index
] =
576 r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, reloc
->bo
, reloc
->bo_usage
);
578 block
->pm4
[reloc
->bo_pm4_index
] = 0;
588 optional
&= (block
->nreg_dirty
!= block
->nreg
);
590 new_dwords
= block
->nreg_dirty
;
591 start_dword
= cs
->cdw
;
592 cp_dwords
= new_dwords
+ 2;
594 memcpy(&cs
->buf
[cs
->cdw
], block
->pm4
, cp_dwords
* 4);
596 /* We are applying the pkt_flags after copying the register block to
597 * the the command stream, because it is possible this block will be
598 * emitted with a different pkt_flags, and we don't want to store the
599 * pkt_flags in the block.
601 cs
->buf
[cs
->cdw
] |= pkt_flags
;
602 cs
->cdw
+= cp_dwords
;
607 newword
= cs
->buf
[start_dword
];
608 newword
&= PKT_COUNT_C
;
609 newword
|= PKT_COUNT_S(new_dwords
);
610 cs
->buf
[start_dword
] = newword
;
613 block
->status
^= R600_BLOCK_STATUS_DIRTY
;
614 block
->nreg_dirty
= 0;
615 LIST_DELINIT(&block
->list
);
618 void r600_flush_emit(struct r600_context
*rctx
)
620 struct radeon_winsys_cs
*cs
= rctx
->rings
.gfx
.cs
;
621 unsigned cp_coher_cntl
= 0;
622 unsigned wait_until
= 0;
623 unsigned emit_flush
= 0;
629 if (rctx
->flags
& R600_CONTEXT_WAIT_3D_IDLE
) {
630 wait_until
|= S_008040_WAIT_3D_IDLE(1);
632 if (rctx
->flags
& R600_CONTEXT_WAIT_CP_DMA_IDLE
) {
633 wait_until
|= S_008040_WAIT_CP_DMA_IDLE(1);
637 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
638 if (rctx
->family
>= CHIP_CAYMAN
) {
639 /* emit a PS partial flush on Cayman/TN */
640 rctx
->flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
644 if (rctx
->flags
& R600_CONTEXT_PS_PARTIAL_FLUSH
) {
645 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
646 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
649 if (rctx
->chip_class
>= R700
&&
650 (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_CB_META
)) {
651 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
652 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META
) | EVENT_INDEX(0);
655 if (rctx
->chip_class
>= R700
&&
656 (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV_DB_META
)) {
657 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
658 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META
) | EVENT_INDEX(0);
661 if (rctx
->flags
& R600_CONTEXT_FLUSH_AND_INV
) {
662 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
663 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0);
664 if (rctx
->chip_class
>= EVERGREEN
) {
665 cp_coher_cntl
= S_0085F0_CB0_DEST_BASE_ENA(1) |
666 S_0085F0_CB1_DEST_BASE_ENA(1) |
667 S_0085F0_CB2_DEST_BASE_ENA(1) |
668 S_0085F0_CB3_DEST_BASE_ENA(1) |
669 S_0085F0_CB4_DEST_BASE_ENA(1) |
670 S_0085F0_CB5_DEST_BASE_ENA(1) |
671 S_0085F0_CB6_DEST_BASE_ENA(1) |
672 S_0085F0_CB7_DEST_BASE_ENA(1) |
673 S_0085F0_CB8_DEST_BASE_ENA(1) |
674 S_0085F0_CB9_DEST_BASE_ENA(1) |
675 S_0085F0_CB10_DEST_BASE_ENA(1) |
676 S_0085F0_CB11_DEST_BASE_ENA(1) |
677 S_0085F0_DB_DEST_BASE_ENA(1) |
678 S_0085F0_TC_ACTION_ENA(1) |
679 S_0085F0_CB_ACTION_ENA(1) |
680 S_0085F0_DB_ACTION_ENA(1) |
681 S_0085F0_SH_ACTION_ENA(1) |
682 S_0085F0_SMX_ACTION_ENA(1) |
683 S_0085F0_FULL_CACHE_ENA(1);
685 cp_coher_cntl
= S_0085F0_SMX_ACTION_ENA(1) |
686 S_0085F0_SH_ACTION_ENA(1) |
687 S_0085F0_VC_ACTION_ENA(1) |
688 S_0085F0_TC_ACTION_ENA(1) |
689 S_0085F0_FULL_CACHE_ENA(1);
694 if (rctx
->flags
& R600_CONTEXT_INVAL_READ_CACHES
) {
695 cp_coher_cntl
|= S_0085F0_VC_ACTION_ENA(1) |
696 S_0085F0_TC_ACTION_ENA(1) |
697 S_0085F0_FULL_CACHE_ENA(1);
701 if (rctx
->flags
& R600_CONTEXT_STREAMOUT_FLUSH
) {
702 cp_coher_cntl
|= S_0085F0_SO0_DEST_BASE_ENA(1) |
703 S_0085F0_SO1_DEST_BASE_ENA(1) |
704 S_0085F0_SO2_DEST_BASE_ENA(1) |
705 S_0085F0_SO3_DEST_BASE_ENA(1) |
706 S_0085F0_SMX_ACTION_ENA(1);
711 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_SYNC
, 3, 0);
712 cs
->buf
[cs
->cdw
++] = cp_coher_cntl
; /* CP_COHER_CNTL */
713 cs
->buf
[cs
->cdw
++] = 0xffffffff; /* CP_COHER_SIZE */
714 cs
->buf
[cs
->cdw
++] = 0; /* CP_COHER_BASE */
715 cs
->buf
[cs
->cdw
++] = 0x0000000A; /* POLL_INTERVAL */
719 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
720 if (rctx
->family
< CHIP_CAYMAN
) {
721 /* wait for things to settle */
722 r600_write_config_reg(cs
, R_008040_WAIT_UNTIL
, wait_until
);
726 /* everything is properly flushed */
730 void r600_context_flush(struct r600_context
*ctx
, unsigned flags
)
732 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
734 if (cs
->cdw
== ctx
->start_cs_cmd
.num_dw
)
737 ctx
->nontimer_queries_suspended
= false;
738 ctx
->streamout
.suspended
= false;
740 /* suspend queries */
741 if (ctx
->num_cs_dw_nontimer_queries_suspend
) {
742 r600_suspend_nontimer_queries(ctx
);
743 ctx
->nontimer_queries_suspended
= true;
746 if (ctx
->streamout
.begin_emitted
) {
747 r600_emit_streamout_end(ctx
);
748 ctx
->streamout
.suspended
= true;
751 /* flush is needed to avoid lockups on some chips with user fences
752 * this will also flush the framebuffer cache
754 ctx
->flags
|= R600_CONTEXT_FLUSH_AND_INV
|
755 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
756 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
757 R600_CONTEXT_WAIT_3D_IDLE
|
758 R600_CONTEXT_WAIT_CP_DMA_IDLE
;
760 r600_flush_emit(ctx
);
762 /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
763 if (ctx
->chip_class
<= R700
) {
764 r600_write_context_reg(cs
, R_028350_SX_MISC
, 0);
767 /* force to keep tiling flags */
768 if (ctx
->keep_tiling_flags
) {
769 flags
|= RADEON_FLUSH_KEEP_TILING_FLAGS
;
774 if (ctx
->screen
->trace_bo
) {
775 struct r600_screen
*rscreen
= ctx
->screen
;
778 for (i
= 0; i
< cs
->cdw
; i
++) {
779 fprintf(stderr
, "[%4d] [%5d] 0x%08x\n", rscreen
->cs_count
, i
, cs
->buf
[i
]);
784 ctx
->ws
->cs_flush(ctx
->rings
.gfx
.cs
, flags
);
786 if (ctx
->screen
->trace_bo
) {
787 struct r600_screen
*rscreen
= ctx
->screen
;
790 for (i
= 0; i
< 10; i
++) {
792 if (!ctx
->ws
->buffer_is_busy(rscreen
->trace_bo
->buf
, RADEON_USAGE_READWRITE
)) {
797 fprintf(stderr
, "timeout on cs lockup likely happen at cs %d dw %d\n",
798 rscreen
->trace_ptr
[1], rscreen
->trace_ptr
[0]);
800 fprintf(stderr
, "cs %d executed in %dms\n", rscreen
->trace_ptr
[1], i
* 5);
806 void r600_begin_new_cs(struct r600_context
*ctx
)
808 struct r600_block
*enable_block
= NULL
;
811 ctx
->pm4_dirty_cdwords
= 0;
816 /* Begin a new CS. */
817 r600_emit_command_buffer(ctx
->rings
.gfx
.cs
, &ctx
->start_cs_cmd
);
819 /* Re-emit states. */
820 ctx
->alphatest_state
.atom
.dirty
= true;
821 ctx
->blend_color
.atom
.dirty
= true;
822 ctx
->cb_misc_state
.atom
.dirty
= true;
823 ctx
->clip_misc_state
.atom
.dirty
= true;
824 ctx
->clip_state
.atom
.dirty
= true;
825 ctx
->db_misc_state
.atom
.dirty
= true;
826 ctx
->db_state
.atom
.dirty
= true;
827 ctx
->framebuffer
.atom
.dirty
= true;
828 ctx
->poly_offset_state
.atom
.dirty
= true;
829 ctx
->vgt_state
.atom
.dirty
= true;
830 ctx
->sample_mask
.atom
.dirty
= true;
831 ctx
->scissor
.atom
.dirty
= true;
832 ctx
->config_state
.atom
.dirty
= true;
833 ctx
->stencil_ref
.atom
.dirty
= true;
834 ctx
->vertex_fetch_shader
.atom
.dirty
= true;
835 ctx
->viewport
.atom
.dirty
= true;
837 if (ctx
->blend_state
.cso
)
838 ctx
->blend_state
.atom
.dirty
= true;
839 if (ctx
->dsa_state
.cso
)
840 ctx
->dsa_state
.atom
.dirty
= true;
841 if (ctx
->rasterizer_state
.cso
)
842 ctx
->rasterizer_state
.atom
.dirty
= true;
844 if (ctx
->chip_class
<= R700
) {
845 ctx
->seamless_cube_map
.atom
.dirty
= true;
848 ctx
->vertex_buffer_state
.dirty_mask
= ctx
->vertex_buffer_state
.enabled_mask
;
849 r600_vertex_buffers_dirty(ctx
);
851 /* Re-emit shader resources. */
852 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
853 struct r600_constbuf_state
*constbuf
= &ctx
->constbuf_state
[shader
];
854 struct r600_textures_info
*samplers
= &ctx
->samplers
[shader
];
856 constbuf
->dirty_mask
= constbuf
->enabled_mask
;
857 samplers
->views
.dirty_mask
= samplers
->views
.enabled_mask
;
858 samplers
->states
.dirty_mask
= samplers
->states
.enabled_mask
;
860 r600_constant_buffers_dirty(ctx
, constbuf
);
861 r600_sampler_views_dirty(ctx
, &samplers
->views
);
862 r600_sampler_states_dirty(ctx
, &samplers
->states
);
865 if (ctx
->streamout
.suspended
) {
866 ctx
->streamout
.append_bitmask
= ctx
->streamout
.enabled_mask
;
867 r600_streamout_buffers_dirty(ctx
);
871 if (ctx
->nontimer_queries_suspended
) {
872 r600_resume_nontimer_queries(ctx
);
875 /* set all valid group as dirty so they get reemited on
878 LIST_FOR_EACH_ENTRY(enable_block
, &ctx
->enable_list
, enable_list
) {
879 if(!(enable_block
->status
& R600_BLOCK_STATUS_DIRTY
)) {
880 LIST_ADDTAIL(&enable_block
->list
,&ctx
->dirty
);
881 enable_block
->status
|= R600_BLOCK_STATUS_DIRTY
;
883 ctx
->pm4_dirty_cdwords
+= enable_block
->pm4_ndwords
;
884 enable_block
->nreg_dirty
= enable_block
->nreg
;
887 /* Re-emit the draw state. */
888 ctx
->last_primitive_type
= -1;
889 ctx
->last_start_instance
= -1;
892 void r600_context_emit_fence(struct r600_context
*ctx
, struct r600_resource
*fence_bo
, unsigned offset
, unsigned value
)
894 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
897 r600_need_cs_space(ctx
, 10, FALSE
);
899 va
= r600_resource_va(&ctx
->screen
->screen
, (void*)fence_bo
);
900 va
= va
+ (offset
<< 2);
902 /* Use of WAIT_UNTIL is deprecated on Cayman+ */
903 if (ctx
->family
>= CHIP_CAYMAN
) {
904 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
905 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4);
907 r600_write_config_reg(cs
, R_008040_WAIT_UNTIL
, S_008040_WAIT_3D_IDLE(1));
910 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0);
911 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5);
912 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* ADDRESS_LO */
913 /* DATA_SEL | INT_EN | ADDRESS_HI */
914 cs
->buf
[cs
->cdw
++] = (1 << 29) | (0 << 24) | ((va
>> 32UL) & 0xFF);
915 cs
->buf
[cs
->cdw
++] = value
; /* DATA_LO */
916 cs
->buf
[cs
->cdw
++] = 0; /* DATA_HI */
917 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
918 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, fence_bo
, RADEON_USAGE_WRITE
);
921 static void r600_flush_vgt_streamout(struct r600_context
*ctx
)
923 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
925 r600_write_config_reg(cs
, R_008490_CP_STRMOUT_CNTL
, 0);
927 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
928 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH
) | EVENT_INDEX(0);
930 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_WAIT_REG_MEM
, 5, 0);
931 cs
->buf
[cs
->cdw
++] = WAIT_REG_MEM_EQUAL
; /* wait until the register is equal to the reference value */
932 cs
->buf
[cs
->cdw
++] = R_008490_CP_STRMOUT_CNTL
>> 2; /* register */
933 cs
->buf
[cs
->cdw
++] = 0;
934 cs
->buf
[cs
->cdw
++] = S_008490_OFFSET_UPDATE_DONE(1); /* reference value */
935 cs
->buf
[cs
->cdw
++] = S_008490_OFFSET_UPDATE_DONE(1); /* mask */
936 cs
->buf
[cs
->cdw
++] = 4; /* poll interval */
939 static void r600_set_streamout_enable(struct r600_context
*ctx
, unsigned buffer_enable_bit
)
941 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
943 if (buffer_enable_bit
) {
944 r600_write_context_reg(cs
, R_028AB0_VGT_STRMOUT_EN
, S_028AB0_STREAMOUT(1));
945 r600_write_context_reg(cs
, R_028B20_VGT_STRMOUT_BUFFER_EN
, buffer_enable_bit
);
947 r600_write_context_reg(cs
, R_028AB0_VGT_STRMOUT_EN
, S_028AB0_STREAMOUT(0));
951 void r600_emit_streamout_begin(struct r600_context
*ctx
, struct r600_atom
*atom
)
953 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
954 struct r600_so_target
**t
= ctx
->streamout
.targets
;
955 unsigned *stride_in_dw
= ctx
->vs_shader
->so
.stride
;
956 unsigned i
, update_flags
= 0;
959 if (ctx
->chip_class
>= EVERGREEN
) {
960 evergreen_flush_vgt_streamout(ctx
);
961 evergreen_set_streamout_enable(ctx
, ctx
->streamout
.enabled_mask
);
963 r600_flush_vgt_streamout(ctx
);
964 r600_set_streamout_enable(ctx
, ctx
->streamout
.enabled_mask
);
967 for (i
= 0; i
< ctx
->streamout
.num_targets
; i
++) {
969 t
[i
]->stride_in_dw
= stride_in_dw
[i
];
971 va
= r600_resource_va(&ctx
->screen
->screen
,
972 (void*)t
[i
]->b
.buffer
);
974 update_flags
|= SURFACE_BASE_UPDATE_STRMOUT(i
);
976 r600_write_context_reg_seq(cs
, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0
+ 16*i
, 3);
977 r600_write_value(cs
, (t
[i
]->b
.buffer_offset
+
978 t
[i
]->b
.buffer_size
) >> 2); /* BUFFER_SIZE (in DW) */
979 r600_write_value(cs
, stride_in_dw
[i
]); /* VTX_STRIDE (in DW) */
980 r600_write_value(cs
, va
>> 8); /* BUFFER_BASE */
982 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
984 r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, r600_resource(t
[i
]->b
.buffer
),
987 /* R7xx requires this packet after updating BUFFER_BASE.
988 * Without this, R7xx locks up. */
989 if (ctx
->family
>= CHIP_RS780
&& ctx
->family
<= CHIP_RV740
) {
990 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BASE_UPDATE
, 1, 0);
991 cs
->buf
[cs
->cdw
++] = i
;
992 cs
->buf
[cs
->cdw
++] = va
>> 8;
994 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
996 r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, r600_resource(t
[i
]->b
.buffer
),
1000 if (ctx
->streamout
.append_bitmask
& (1 << i
)) {
1001 va
= r600_resource_va(&ctx
->screen
->screen
,
1002 (void*)t
[i
]->buf_filled_size
) + t
[i
]->buf_filled_size_offset
;
1004 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
1005 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
1006 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM
); /* control */
1007 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1008 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1009 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* src address lo */
1010 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* src address hi */
1012 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1013 cs
->buf
[cs
->cdw
++] =
1014 r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, t
[i
]->buf_filled_size
,
1017 /* Start from the beginning. */
1018 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
1019 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
1020 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET
); /* control */
1021 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1022 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1023 cs
->buf
[cs
->cdw
++] = t
[i
]->b
.buffer_offset
>> 2; /* buffer offset in DW */
1024 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1029 if (ctx
->family
> CHIP_R600
&& ctx
->family
< CHIP_RV770
) {
1030 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_BASE_UPDATE
, 0, 0);
1031 cs
->buf
[cs
->cdw
++] = update_flags
;
1033 ctx
->streamout
.begin_emitted
= true;
1036 void r600_emit_streamout_end(struct r600_context
*ctx
)
1038 struct radeon_winsys_cs
*cs
= ctx
->rings
.gfx
.cs
;
1039 struct r600_so_target
**t
= ctx
->streamout
.targets
;
1043 if (ctx
->chip_class
>= EVERGREEN
) {
1044 evergreen_flush_vgt_streamout(ctx
);
1046 r600_flush_vgt_streamout(ctx
);
1049 for (i
= 0; i
< ctx
->streamout
.num_targets
; i
++) {
1051 va
= r600_resource_va(&ctx
->screen
->screen
,
1052 (void*)t
[i
]->buf_filled_size
) + t
[i
]->buf_filled_size_offset
;
1053 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE
, 4, 0);
1054 cs
->buf
[cs
->cdw
++] = STRMOUT_SELECT_BUFFER(i
) |
1055 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE
) |
1056 STRMOUT_STORE_BUFFER_FILLED_SIZE
; /* control */
1057 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* dst address lo */
1058 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* dst address hi */
1059 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1060 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1062 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1063 cs
->buf
[cs
->cdw
++] =
1064 r600_context_bo_reloc(ctx
, &ctx
->rings
.gfx
, t
[i
]->buf_filled_size
,
1065 RADEON_USAGE_WRITE
);
1069 if (ctx
->chip_class
>= EVERGREEN
) {
1070 ctx
->flags
|= R600_CONTEXT_STREAMOUT_FLUSH
;
1071 evergreen_set_streamout_enable(ctx
, 0);
1073 if (ctx
->chip_class
>= R700
) {
1074 ctx
->flags
|= R600_CONTEXT_STREAMOUT_FLUSH
;
1076 r600_set_streamout_enable(ctx
, 0);
1078 ctx
->flags
|= R600_CONTEXT_WAIT_3D_IDLE
| R600_CONTEXT_FLUSH_AND_INV
;
1079 ctx
->streamout
.begin_emitted
= false;
1082 /* The max number of bytes to copy per packet. */
1083 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
1085 void r600_cp_dma_copy_buffer(struct r600_context
*rctx
,
1086 struct pipe_resource
*dst
, uint64_t dst_offset
,
1087 struct pipe_resource
*src
, uint64_t src_offset
,
1090 struct radeon_winsys_cs
*cs
= rctx
->rings
.gfx
.cs
;
1093 assert(rctx
->chip_class
!= R600
);
1095 /* CP DMA doesn't work on R600 (flushing seems to be unreliable). */
1096 if (rctx
->chip_class
== R600
) {
1100 dst_offset
+= r600_resource_va(&rctx
->screen
->screen
, dst
);
1101 src_offset
+= r600_resource_va(&rctx
->screen
->screen
, src
);
1103 /* We flush the caches, because we might read from or write
1104 * to resources which are bound right now. */
1105 rctx
->flags
|= R600_CONTEXT_INVAL_READ_CACHES
|
1106 R600_CONTEXT_FLUSH_AND_INV
|
1107 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
1108 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
1109 R600_CONTEXT_STREAMOUT_FLUSH
|
1110 R600_CONTEXT_WAIT_3D_IDLE
;
1112 /* There are differences between R700 and EG in CP DMA,
1113 * but we only use the common bits here. */
1116 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
1117 unsigned src_reloc
, dst_reloc
;
1119 r600_need_cs_space(rctx
, 10 + (rctx
->flags
? R600_MAX_FLUSH_CS_DWORDS
: 0), FALSE
);
1121 /* Flush the caches for the first copy only. */
1123 r600_flush_emit(rctx
);
1126 /* Do the synchronization after the last copy, so that all data is written to memory. */
1127 if (size
== byte_count
) {
1128 sync
= PKT3_CP_DMA_CP_SYNC
;
1131 /* This must be done after r600_need_cs_space. */
1132 src_reloc
= r600_context_bo_reloc(rctx
, &rctx
->rings
.gfx
, (struct r600_resource
*)src
, RADEON_USAGE_READ
);
1133 dst_reloc
= r600_context_bo_reloc(rctx
, &rctx
->rings
.gfx
, (struct r600_resource
*)dst
, RADEON_USAGE_WRITE
);
1135 r600_write_value(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
1136 r600_write_value(cs
, src_offset
); /* SRC_ADDR_LO [31:0] */
1137 r600_write_value(cs
, sync
| ((src_offset
>> 32) & 0xff)); /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */
1138 r600_write_value(cs
, dst_offset
); /* DST_ADDR_LO [31:0] */
1139 r600_write_value(cs
, (dst_offset
>> 32) & 0xff); /* DST_ADDR_HI [7:0] */
1140 r600_write_value(cs
, byte_count
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
1142 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0));
1143 r600_write_value(cs
, src_reloc
);
1144 r600_write_value(cs
, PKT3(PKT3_NOP
, 0, 0));
1145 r600_write_value(cs
, dst_reloc
);
1148 src_offset
+= byte_count
;
1149 dst_offset
+= byte_count
;
1152 /* Invalidate the read caches. */
1153 rctx
->flags
|= R600_CONTEXT_INVAL_READ_CACHES
;
1156 void r600_need_dma_space(struct r600_context
*ctx
, unsigned num_dw
)
1158 /* The number of dwords we already used in the DMA so far. */
1159 num_dw
+= ctx
->rings
.dma
.cs
->cdw
;
1160 /* Flush if there's not enough space. */
1161 if (num_dw
> RADEON_MAX_CMDBUF_DWORDS
) {
1162 ctx
->rings
.dma
.flush(ctx
, RADEON_FLUSH_ASYNC
);
1166 void r600_dma_copy(struct r600_context
*rctx
,
1167 struct pipe_resource
*dst
,
1168 struct pipe_resource
*src
,
1169 uint64_t dst_offset
,
1170 uint64_t src_offset
,
1173 struct radeon_winsys_cs
*cs
= rctx
->rings
.dma
.cs
;
1174 unsigned i
, ncopy
, csize
, shift
;
1175 struct r600_resource
*rdst
= (struct r600_resource
*)dst
;
1176 struct r600_resource
*rsrc
= (struct r600_resource
*)src
;
1178 /* make sure that the dma ring is only one active */
1179 rctx
->rings
.gfx
.flush(rctx
, RADEON_FLUSH_ASYNC
);
1183 ncopy
= (size
/ 0xffff) + !!(size
% 0xffff);
1185 r600_need_dma_space(rctx
, ncopy
* 5);
1186 for (i
= 0; i
< ncopy
; i
++) {
1187 csize
= size
< 0xffff ? size
: 0xffff;
1188 /* emit reloc before writting cs so that cs is always in consistent state */
1189 r600_context_bo_reloc(rctx
, &rctx
->rings
.dma
, rsrc
, RADEON_USAGE_READ
);
1190 r600_context_bo_reloc(rctx
, &rctx
->rings
.dma
, rdst
, RADEON_USAGE_WRITE
);
1191 cs
->buf
[cs
->cdw
++] = DMA_PACKET(DMA_PACKET_COPY
, 0, 0, csize
);
1192 cs
->buf
[cs
->cdw
++] = dst_offset
& 0xfffffffc;
1193 cs
->buf
[cs
->cdw
++] = src_offset
& 0xfffffffc;
1194 cs
->buf
[cs
->cdw
++] = (dst_offset
>> 32UL) & 0xff;
1195 cs
->buf
[cs
->cdw
++] = (src_offset
>> 32UL) & 0xff;
1196 dst_offset
+= csize
<< shift
;
1197 src_offset
+= csize
<< shift
;