r600g: add multi ring support with dma as first second ring v4
[mesa.git] / src / gallium / drivers / r600 / r600_hw_context.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #include "r600_hw_context_priv.h"
27 #include "r600d.h"
28 #include "util/u_memory.h"
29 #include <errno.h>
30 #include <unistd.h>
31
32 /* Get backends mask */
33 void r600_get_backend_mask(struct r600_context *ctx)
34 {
35 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
36 struct r600_resource *buffer;
37 uint32_t *results;
38 unsigned num_backends = ctx->screen->info.r600_num_backends;
39 unsigned i, mask = 0;
40 uint64_t va;
41
42 /* if backend_map query is supported by the kernel */
43 if (ctx->screen->info.r600_backend_map_valid) {
44 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
45 unsigned backend_map = ctx->screen->info.r600_backend_map;
46 unsigned item_width, item_mask;
47
48 if (ctx->chip_class >= EVERGREEN) {
49 item_width = 4;
50 item_mask = 0x7;
51 } else {
52 item_width = 2;
53 item_mask = 0x3;
54 }
55
56 while(num_tile_pipes--) {
57 i = backend_map & item_mask;
58 mask |= (1<<i);
59 backend_map >>= item_width;
60 }
61 if (mask != 0) {
62 ctx->backend_mask = mask;
63 return;
64 }
65 }
66
67 /* otherwise backup path for older kernels */
68
69 /* create buffer for event data */
70 buffer = (struct r600_resource*)
71 pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
72 PIPE_USAGE_STAGING, ctx->max_db*16);
73 if (!buffer)
74 goto err;
75 va = r600_resource_va(&ctx->screen->screen, (void*)buffer);
76
77 /* initialize buffer with zeroes */
78 results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
79 if (results) {
80 memset(results, 0, ctx->max_db * 4 * 4);
81 ctx->ws->buffer_unmap(buffer->cs_buf);
82
83 /* emit EVENT_WRITE for ZPASS_DONE */
84 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
85 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
86 cs->buf[cs->cdw++] = va;
87 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
88
89 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
90 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, buffer, RADEON_USAGE_WRITE);
91
92 /* analyze results */
93 results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
94 if (results) {
95 for(i = 0; i < ctx->max_db; i++) {
96 /* at least highest bit will be set if backend is used */
97 if (results[i*4 + 1])
98 mask |= (1<<i);
99 }
100 ctx->ws->buffer_unmap(buffer->cs_buf);
101 }
102 }
103
104 pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
105
106 if (mask != 0) {
107 ctx->backend_mask = mask;
108 return;
109 }
110
111 err:
112 /* fallback to old method - set num_backends lower bits to 1 */
113 ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
114 return;
115 }
116
117 static void r600_init_block(struct r600_context *ctx,
118 struct r600_block *block,
119 const struct r600_reg *reg, int index, int nreg,
120 unsigned opcode, unsigned offset_base)
121 {
122 int i = index;
123 int j, n = nreg;
124
125 /* initialize block */
126 block->flags = 0;
127 block->status |= R600_BLOCK_STATUS_DIRTY; /* dirty all blocks at start */
128 block->start_offset = reg[i].offset;
129 block->pm4[block->pm4_ndwords++] = PKT3(opcode, n, 0);
130 block->pm4[block->pm4_ndwords++] = (block->start_offset - offset_base) >> 2;
131 block->reg = &block->pm4[block->pm4_ndwords];
132 block->pm4_ndwords += n;
133 block->nreg = n;
134 block->nreg_dirty = n;
135 LIST_INITHEAD(&block->list);
136 LIST_INITHEAD(&block->enable_list);
137
138 for (j = 0; j < n; j++) {
139 if (reg[i+j].flags & REG_FLAG_DIRTY_ALWAYS) {
140 block->flags |= REG_FLAG_DIRTY_ALWAYS;
141 }
142 if (reg[i+j].flags & REG_FLAG_ENABLE_ALWAYS) {
143 if (!(block->status & R600_BLOCK_STATUS_ENABLED)) {
144 block->status |= R600_BLOCK_STATUS_ENABLED;
145 LIST_ADDTAIL(&block->enable_list, &ctx->enable_list);
146 LIST_ADDTAIL(&block->list,&ctx->dirty);
147 }
148 }
149 if (reg[i+j].flags & REG_FLAG_FLUSH_CHANGE) {
150 block->flags |= REG_FLAG_FLUSH_CHANGE;
151 }
152
153 if (reg[i+j].flags & REG_FLAG_NEED_BO) {
154 block->nbo++;
155 assert(block->nbo < R600_BLOCK_MAX_BO);
156 block->pm4_bo_index[j] = block->nbo;
157 block->pm4[block->pm4_ndwords++] = PKT3(PKT3_NOP, 0, 0);
158 block->pm4[block->pm4_ndwords++] = 0x00000000;
159 block->reloc[block->nbo].bo_pm4_index = block->pm4_ndwords - 1;
160 }
161 }
162 /* check that we stay in limit */
163 assert(block->pm4_ndwords < R600_BLOCK_MAX_REG);
164 }
165
166 int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg,
167 unsigned opcode, unsigned offset_base)
168 {
169 struct r600_block *block;
170 struct r600_range *range;
171 int offset;
172
173 for (unsigned i = 0, n = 0; i < nreg; i += n) {
174 /* ignore new block balise */
175 if (reg[i].offset == GROUP_FORCE_NEW_BLOCK) {
176 n = 1;
177 continue;
178 }
179
180 /* register that need relocation are in their own group */
181 /* find number of consecutive registers */
182 n = 0;
183 offset = reg[i].offset;
184 while (reg[i + n].offset == offset) {
185 n++;
186 offset += 4;
187 if ((n + i) >= nreg)
188 break;
189 if (n >= (R600_BLOCK_MAX_REG - 2))
190 break;
191 }
192
193 /* allocate new block */
194 block = calloc(1, sizeof(struct r600_block));
195 if (block == NULL) {
196 return -ENOMEM;
197 }
198 ctx->nblocks++;
199 for (int j = 0; j < n; j++) {
200 range = &ctx->range[CTX_RANGE_ID(reg[i + j].offset)];
201 /* create block table if it doesn't exist */
202 if (!range->blocks)
203 range->blocks = calloc(1 << HASH_SHIFT, sizeof(void *));
204 if (!range->blocks)
205 return -1;
206
207 range->blocks[CTX_BLOCK_ID(reg[i + j].offset)] = block;
208 }
209
210 r600_init_block(ctx, block, reg, i, n, opcode, offset_base);
211
212 }
213 return 0;
214 }
215
216 static const struct r600_reg r600_context_reg_list[] = {
217 {R_028D24_DB_HTILE_SURFACE, 0, 0},
218 {R_028614_SPI_VS_OUT_ID_0, 0, 0},
219 {R_028618_SPI_VS_OUT_ID_1, 0, 0},
220 {R_02861C_SPI_VS_OUT_ID_2, 0, 0},
221 {R_028620_SPI_VS_OUT_ID_3, 0, 0},
222 {R_028624_SPI_VS_OUT_ID_4, 0, 0},
223 {R_028628_SPI_VS_OUT_ID_5, 0, 0},
224 {R_02862C_SPI_VS_OUT_ID_6, 0, 0},
225 {R_028630_SPI_VS_OUT_ID_7, 0, 0},
226 {R_028634_SPI_VS_OUT_ID_8, 0, 0},
227 {R_028638_SPI_VS_OUT_ID_9, 0, 0},
228 {R_0286C4_SPI_VS_OUT_CONFIG, 0, 0},
229 {GROUP_FORCE_NEW_BLOCK, 0, 0},
230 {R_028858_SQ_PGM_START_VS, REG_FLAG_NEED_BO, 0},
231 {GROUP_FORCE_NEW_BLOCK, 0, 0},
232 {R_028868_SQ_PGM_RESOURCES_VS, 0, 0},
233 {GROUP_FORCE_NEW_BLOCK, 0, 0},
234 {R_0288A4_SQ_PGM_RESOURCES_FS, 0, 0},
235 {R_0288DC_SQ_PGM_CF_OFFSET_FS, 0, 0},
236 {R_028644_SPI_PS_INPUT_CNTL_0, 0, 0},
237 {R_028648_SPI_PS_INPUT_CNTL_1, 0, 0},
238 {R_02864C_SPI_PS_INPUT_CNTL_2, 0, 0},
239 {R_028650_SPI_PS_INPUT_CNTL_3, 0, 0},
240 {R_028654_SPI_PS_INPUT_CNTL_4, 0, 0},
241 {R_028658_SPI_PS_INPUT_CNTL_5, 0, 0},
242 {R_02865C_SPI_PS_INPUT_CNTL_6, 0, 0},
243 {R_028660_SPI_PS_INPUT_CNTL_7, 0, 0},
244 {R_028664_SPI_PS_INPUT_CNTL_8, 0, 0},
245 {R_028668_SPI_PS_INPUT_CNTL_9, 0, 0},
246 {R_02866C_SPI_PS_INPUT_CNTL_10, 0, 0},
247 {R_028670_SPI_PS_INPUT_CNTL_11, 0, 0},
248 {R_028674_SPI_PS_INPUT_CNTL_12, 0, 0},
249 {R_028678_SPI_PS_INPUT_CNTL_13, 0, 0},
250 {R_02867C_SPI_PS_INPUT_CNTL_14, 0, 0},
251 {R_028680_SPI_PS_INPUT_CNTL_15, 0, 0},
252 {R_028684_SPI_PS_INPUT_CNTL_16, 0, 0},
253 {R_028688_SPI_PS_INPUT_CNTL_17, 0, 0},
254 {R_02868C_SPI_PS_INPUT_CNTL_18, 0, 0},
255 {R_028690_SPI_PS_INPUT_CNTL_19, 0, 0},
256 {R_028694_SPI_PS_INPUT_CNTL_20, 0, 0},
257 {R_028698_SPI_PS_INPUT_CNTL_21, 0, 0},
258 {R_02869C_SPI_PS_INPUT_CNTL_22, 0, 0},
259 {R_0286A0_SPI_PS_INPUT_CNTL_23, 0, 0},
260 {R_0286A4_SPI_PS_INPUT_CNTL_24, 0, 0},
261 {R_0286A8_SPI_PS_INPUT_CNTL_25, 0, 0},
262 {R_0286AC_SPI_PS_INPUT_CNTL_26, 0, 0},
263 {R_0286B0_SPI_PS_INPUT_CNTL_27, 0, 0},
264 {R_0286B4_SPI_PS_INPUT_CNTL_28, 0, 0},
265 {R_0286B8_SPI_PS_INPUT_CNTL_29, 0, 0},
266 {R_0286BC_SPI_PS_INPUT_CNTL_30, 0, 0},
267 {R_0286C0_SPI_PS_INPUT_CNTL_31, 0, 0},
268 {R_0286CC_SPI_PS_IN_CONTROL_0, 0, 0},
269 {R_0286D0_SPI_PS_IN_CONTROL_1, 0, 0},
270 {R_0286D8_SPI_INPUT_Z, 0, 0},
271 {GROUP_FORCE_NEW_BLOCK, 0, 0},
272 {R_028840_SQ_PGM_START_PS, REG_FLAG_NEED_BO, 0},
273 {GROUP_FORCE_NEW_BLOCK, 0, 0},
274 {R_028850_SQ_PGM_RESOURCES_PS, 0, 0},
275 {R_028854_SQ_PGM_EXPORTS_PS, 0, 0},
276 };
277
278 /* initialize */
279 void r600_context_fini(struct r600_context *ctx)
280 {
281 struct r600_block *block;
282 struct r600_range *range;
283
284 if (ctx->range) {
285 for (int i = 0; i < NUM_RANGES; i++) {
286 if (!ctx->range[i].blocks)
287 continue;
288 for (int j = 0; j < (1 << HASH_SHIFT); j++) {
289 block = ctx->range[i].blocks[j];
290 if (block) {
291 for (int k = 0, offset = block->start_offset; k < block->nreg; k++, offset += 4) {
292 range = &ctx->range[CTX_RANGE_ID(offset)];
293 range->blocks[CTX_BLOCK_ID(offset)] = NULL;
294 }
295 for (int k = 1; k <= block->nbo; k++) {
296 pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL);
297 }
298 free(block);
299 }
300 }
301 free(ctx->range[i].blocks);
302 }
303 }
304 free(ctx->blocks);
305 }
306
307 int r600_setup_block_table(struct r600_context *ctx)
308 {
309 /* setup block table */
310 int c = 0;
311 ctx->blocks = calloc(ctx->nblocks, sizeof(void*));
312 if (!ctx->blocks)
313 return -ENOMEM;
314 for (int i = 0; i < NUM_RANGES; i++) {
315 if (!ctx->range[i].blocks)
316 continue;
317 for (int j = 0, add; j < (1 << HASH_SHIFT); j++) {
318 if (!ctx->range[i].blocks[j])
319 continue;
320
321 add = 1;
322 for (int k = 0; k < c; k++) {
323 if (ctx->blocks[k] == ctx->range[i].blocks[j]) {
324 add = 0;
325 break;
326 }
327 }
328 if (add) {
329 assert(c < ctx->nblocks);
330 ctx->blocks[c++] = ctx->range[i].blocks[j];
331 j += (ctx->range[i].blocks[j]->nreg) - 1;
332 }
333 }
334 }
335 return 0;
336 }
337
338 int r600_context_init(struct r600_context *ctx)
339 {
340 int r;
341
342 /* add blocks */
343 r = r600_context_add_block(ctx, r600_context_reg_list,
344 Elements(r600_context_reg_list), PKT3_SET_CONTEXT_REG, R600_CONTEXT_REG_OFFSET);
345 if (r)
346 goto out_err;
347
348 r = r600_setup_block_table(ctx);
349 if (r)
350 goto out_err;
351
352 ctx->max_db = 4;
353 return 0;
354 out_err:
355 r600_context_fini(ctx);
356 return r;
357 }
358
359 void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
360 boolean count_draw_in)
361 {
362 /* The number of dwords we already used in the CS so far. */
363 num_dw += ctx->rings.gfx.cs->cdw;
364
365 if (count_draw_in) {
366 unsigned i;
367
368 /* The number of dwords all the dirty states would take. */
369 for (i = 0; i < R600_NUM_ATOMS; i++) {
370 if (ctx->atoms[i] && ctx->atoms[i]->dirty) {
371 num_dw += ctx->atoms[i]->num_dw;
372 #if R600_TRACE_CS
373 if (ctx->screen->trace_bo) {
374 num_dw += R600_TRACE_CS_DWORDS;
375 }
376 #endif
377 }
378 }
379
380 num_dw += ctx->pm4_dirty_cdwords;
381
382 /* The upper-bound of how much space a draw command would take. */
383 num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
384 #if R600_TRACE_CS
385 if (ctx->screen->trace_bo) {
386 num_dw += R600_TRACE_CS_DWORDS;
387 }
388 #endif
389 }
390
391 /* Count in queries_suspend. */
392 num_dw += ctx->num_cs_dw_nontimer_queries_suspend;
393
394 /* Count in streamout_end at the end of CS. */
395 num_dw += ctx->num_cs_dw_streamout_end;
396
397 /* Count in render_condition(NULL) at the end of CS. */
398 if (ctx->predicate_drawing) {
399 num_dw += 3;
400 }
401
402 /* SX_MISC */
403 if (ctx->chip_class <= R700) {
404 num_dw += 3;
405 }
406
407 /* Count in framebuffer cache flushes at the end of CS. */
408 num_dw += R600_MAX_FLUSH_CS_DWORDS;
409
410 /* The fence at the end of CS. */
411 num_dw += 10;
412
413 /* Flush if there's not enough space. */
414 if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
415 ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
416 }
417 }
418
419 void r600_context_dirty_block(struct r600_context *ctx,
420 struct r600_block *block,
421 int dirty, int index)
422 {
423 if ((index + 1) > block->nreg_dirty)
424 block->nreg_dirty = index + 1;
425
426 if ((dirty != (block->status & R600_BLOCK_STATUS_DIRTY)) || !(block->status & R600_BLOCK_STATUS_ENABLED)) {
427 block->status |= R600_BLOCK_STATUS_DIRTY;
428 ctx->pm4_dirty_cdwords += block->pm4_ndwords;
429 if (!(block->status & R600_BLOCK_STATUS_ENABLED)) {
430 block->status |= R600_BLOCK_STATUS_ENABLED;
431 LIST_ADDTAIL(&block->enable_list, &ctx->enable_list);
432 }
433 LIST_ADDTAIL(&block->list,&ctx->dirty);
434
435 if (block->flags & REG_FLAG_FLUSH_CHANGE) {
436 ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE;
437 }
438 }
439 }
440
441 /**
442 * If reg needs a reloc, this function will add it to its block's reloc list.
443 * @return true if reg needs a reloc, false otherwise
444 */
445 static bool r600_reg_set_block_reloc(struct r600_pipe_reg *reg)
446 {
447 unsigned reloc_id;
448
449 if (!reg->block->pm4_bo_index[reg->id]) {
450 return false;
451 }
452 /* find relocation */
453 reloc_id = reg->block->pm4_bo_index[reg->id];
454 pipe_resource_reference(
455 (struct pipe_resource**)&reg->block->reloc[reloc_id].bo,
456 &reg->bo->b.b);
457 reg->block->reloc[reloc_id].bo_usage = reg->bo_usage;
458 return true;
459 }
460
461 /**
462 * This function will emit all the registers in state directly to the command
463 * stream allowing you to bypass the r600_context dirty list.
464 *
465 * This is used for dispatching compute shaders to avoid mixing compute and
466 * 3D states in the context's dirty list.
467 *
468 * @param pkt_flags Should be either 0 or RADEON_CP_PACKET3_COMPUTE_MODE. This
469 * value will be passed on to r600_context_block_emit_dirty an or'd against
470 * the PKT3 headers.
471 */
472 void r600_context_pipe_state_emit(struct r600_context *ctx,
473 struct r600_pipe_state *state,
474 unsigned pkt_flags)
475 {
476 unsigned i;
477
478 /* Mark all blocks as dirty:
479 * Since two registers can be in the same block, we need to make sure
480 * we mark all the blocks dirty before we emit any of them. If we were
481 * to mark blocks dirty and emit them in the same loop, like this:
482 *
483 * foreach (reg in state->regs) {
484 * mark_dirty(reg->block)
485 * emit_block(reg->block)
486 * }
487 *
488 * Then if we have two registers in this state that are in the same
489 * block, we would end up emitting that block twice.
490 */
491 for (i = 0; i < state->nregs; i++) {
492 struct r600_pipe_reg *reg = &state->regs[i];
493 /* Mark all the registers in the block as dirty */
494 reg->block->nreg_dirty = reg->block->nreg;
495 reg->block->status |= R600_BLOCK_STATUS_DIRTY;
496 /* Update the reloc for this register if necessary. */
497 r600_reg_set_block_reloc(reg);
498 }
499
500 /* Emit the registers writes */
501 for (i = 0; i < state->nregs; i++) {
502 struct r600_pipe_reg *reg = &state->regs[i];
503 if (reg->block->status & R600_BLOCK_STATUS_DIRTY) {
504 r600_context_block_emit_dirty(ctx, reg->block, pkt_flags);
505 }
506 }
507 }
508
509 void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state)
510 {
511 struct r600_block *block;
512 int dirty;
513 for (int i = 0; i < state->nregs; i++) {
514 unsigned id;
515 struct r600_pipe_reg *reg = &state->regs[i];
516
517 block = reg->block;
518 id = reg->id;
519
520 dirty = block->status & R600_BLOCK_STATUS_DIRTY;
521
522 if (reg->value != block->reg[id]) {
523 block->reg[id] = reg->value;
524 dirty |= R600_BLOCK_STATUS_DIRTY;
525 }
526 if (block->flags & REG_FLAG_DIRTY_ALWAYS)
527 dirty |= R600_BLOCK_STATUS_DIRTY;
528 if (r600_reg_set_block_reloc(reg)) {
529 /* always force dirty for relocs for now */
530 dirty |= R600_BLOCK_STATUS_DIRTY;
531 }
532
533 if (dirty)
534 r600_context_dirty_block(ctx, block, dirty, id);
535 }
536 }
537
538 /**
539 * @param pkt_flags should be set to RADEON_CP_PACKET3_COMPUTE_MODE if this
540 * block will be used for compute shaders.
541 */
542 void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block,
543 unsigned pkt_flags)
544 {
545 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
546 int optional = block->nbo == 0 && !(block->flags & REG_FLAG_DIRTY_ALWAYS);
547 int cp_dwords = block->pm4_ndwords, start_dword = 0;
548 int new_dwords = 0;
549 int nbo = block->nbo;
550
551 if (block->nreg_dirty == 0 && optional) {
552 goto out;
553 }
554
555 if (nbo) {
556 for (int j = 0; j < block->nreg; j++) {
557 if (block->pm4_bo_index[j]) {
558 /* find relocation */
559 struct r600_block_reloc *reloc = &block->reloc[block->pm4_bo_index[j]];
560 if (reloc->bo) {
561 block->pm4[reloc->bo_pm4_index] =
562 r600_context_bo_reloc(ctx, &ctx->rings.gfx, reloc->bo, reloc->bo_usage);
563 } else {
564 block->pm4[reloc->bo_pm4_index] = 0;
565 }
566 nbo--;
567 if (nbo == 0)
568 break;
569
570 }
571 }
572 }
573
574 optional &= (block->nreg_dirty != block->nreg);
575 if (optional) {
576 new_dwords = block->nreg_dirty;
577 start_dword = cs->cdw;
578 cp_dwords = new_dwords + 2;
579 }
580 memcpy(&cs->buf[cs->cdw], block->pm4, cp_dwords * 4);
581
582 /* We are applying the pkt_flags after copying the register block to
583 * the the command stream, because it is possible this block will be
584 * emitted with a different pkt_flags, and we don't want to store the
585 * pkt_flags in the block.
586 */
587 cs->buf[cs->cdw] |= pkt_flags;
588 cs->cdw += cp_dwords;
589
590 if (optional) {
591 uint32_t newword;
592
593 newword = cs->buf[start_dword];
594 newword &= PKT_COUNT_C;
595 newword |= PKT_COUNT_S(new_dwords);
596 cs->buf[start_dword] = newword;
597 }
598 out:
599 block->status ^= R600_BLOCK_STATUS_DIRTY;
600 block->nreg_dirty = 0;
601 LIST_DELINIT(&block->list);
602 }
603
604 void r600_flush_emit(struct r600_context *rctx)
605 {
606 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
607 unsigned cp_coher_cntl = 0;
608 unsigned wait_until = 0;
609 unsigned emit_flush = 0;
610
611 if (!rctx->flags) {
612 return;
613 }
614
615 if (rctx->chip_class >= R700 &&
616 (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META)) {
617 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
618 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0);
619 }
620
621 if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV) {
622 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
623 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
624 if (rctx->chip_class >= EVERGREEN) {
625 cp_coher_cntl = S_0085F0_CB0_DEST_BASE_ENA(1) |
626 S_0085F0_CB1_DEST_BASE_ENA(1) |
627 S_0085F0_CB2_DEST_BASE_ENA(1) |
628 S_0085F0_CB3_DEST_BASE_ENA(1) |
629 S_0085F0_CB4_DEST_BASE_ENA(1) |
630 S_0085F0_CB5_DEST_BASE_ENA(1) |
631 S_0085F0_CB6_DEST_BASE_ENA(1) |
632 S_0085F0_CB7_DEST_BASE_ENA(1) |
633 S_0085F0_CB8_DEST_BASE_ENA(1) |
634 S_0085F0_CB9_DEST_BASE_ENA(1) |
635 S_0085F0_CB10_DEST_BASE_ENA(1) |
636 S_0085F0_CB11_DEST_BASE_ENA(1) |
637 S_0085F0_DB_DEST_BASE_ENA(1) |
638 S_0085F0_TC_ACTION_ENA(1) |
639 S_0085F0_CB_ACTION_ENA(1) |
640 S_0085F0_DB_ACTION_ENA(1) |
641 S_0085F0_SH_ACTION_ENA(1) |
642 S_0085F0_SMX_ACTION_ENA(1) |
643 S_0085F0_FULL_CACHE_ENA(1);
644 } else {
645 cp_coher_cntl = S_0085F0_SMX_ACTION_ENA(1) |
646 S_0085F0_SH_ACTION_ENA(1) |
647 S_0085F0_VC_ACTION_ENA(1) |
648 S_0085F0_TC_ACTION_ENA(1) |
649 S_0085F0_FULL_CACHE_ENA(1);
650 }
651 }
652
653 if (rctx->flags & R600_CONTEXT_INVAL_READ_CACHES) {
654 cp_coher_cntl |= S_0085F0_VC_ACTION_ENA(1) |
655 S_0085F0_TC_ACTION_ENA(1) |
656 S_0085F0_FULL_CACHE_ENA(1);
657 emit_flush = 1;
658 }
659
660 if (rctx->family >= CHIP_RV770 && rctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
661 cp_coher_cntl |= S_0085F0_SO0_DEST_BASE_ENA(1) |
662 S_0085F0_SO1_DEST_BASE_ENA(1) |
663 S_0085F0_SO2_DEST_BASE_ENA(1) |
664 S_0085F0_SO3_DEST_BASE_ENA(1) |
665 S_0085F0_SMX_ACTION_ENA(1);
666 emit_flush = 1;
667 }
668
669 if (emit_flush) {
670 cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_SYNC, 3, 0);
671 cs->buf[cs->cdw++] = cp_coher_cntl; /* CP_COHER_CNTL */
672 cs->buf[cs->cdw++] = 0xffffffff; /* CP_COHER_SIZE */
673 cs->buf[cs->cdw++] = 0; /* CP_COHER_BASE */
674 cs->buf[cs->cdw++] = 0x0000000A; /* POLL_INTERVAL */
675 }
676
677 if (rctx->flags & R600_CONTEXT_WAIT_3D_IDLE) {
678 wait_until |= S_008040_WAIT_3D_IDLE(1);
679 }
680 if (rctx->flags & R600_CONTEXT_WAIT_CP_DMA_IDLE) {
681 wait_until |= S_008040_WAIT_CP_DMA_IDLE(1);
682 }
683 if (wait_until) {
684 /* wait for things to settle */
685 r600_write_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
686 }
687
688 /* everything is properly flushed */
689 rctx->flags = 0;
690 }
691
692 void r600_context_flush(struct r600_context *ctx, unsigned flags)
693 {
694 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
695
696 if (cs->cdw == ctx->start_cs_cmd.num_dw)
697 return;
698
699 ctx->nontimer_queries_suspended = false;
700 ctx->streamout_suspended = false;
701
702 /* suspend queries */
703 if (ctx->num_cs_dw_nontimer_queries_suspend) {
704 r600_suspend_nontimer_queries(ctx);
705 ctx->nontimer_queries_suspended = true;
706 }
707
708 if (ctx->num_cs_dw_streamout_end) {
709 r600_context_streamout_end(ctx);
710 ctx->streamout_suspended = true;
711 }
712
713 /* flush is needed to avoid lockups on some chips with user fences
714 * this will also flush the framebuffer cache
715 */
716 ctx->flags |= R600_CONTEXT_FLUSH_AND_INV |
717 R600_CONTEXT_FLUSH_AND_INV_CB_META |
718 R600_CONTEXT_WAIT_3D_IDLE |
719 R600_CONTEXT_WAIT_CP_DMA_IDLE;
720
721 r600_flush_emit(ctx);
722
723 /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
724 if (ctx->chip_class <= R700) {
725 r600_write_context_reg(cs, R_028350_SX_MISC, 0);
726 }
727
728 /* force to keep tiling flags */
729 if (ctx->keep_tiling_flags) {
730 flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
731 }
732
733 /* Flush the CS. */
734 #if R600_TRACE_CS
735 if (ctx->screen->trace_bo) {
736 struct r600_screen *rscreen = ctx->screen;
737 unsigned i;
738
739 for (i = 0; i < cs->cdw; i++) {
740 fprintf(stderr, "[%4d] [%5d] 0x%08x\n", rscreen->cs_count, i, cs->buf[i]);
741 }
742 rscreen->cs_count++;
743 }
744 #endif
745 ctx->ws->cs_flush(ctx->rings.gfx.cs, flags);
746 #if R600_TRACE_CS
747 if (ctx->screen->trace_bo) {
748 struct r600_screen *rscreen = ctx->screen;
749 unsigned i;
750
751 for (i = 0; i < 10; i++) {
752 usleep(5);
753 if (!ctx->ws->buffer_is_busy(rscreen->trace_bo->buf, RADEON_USAGE_READWRITE)) {
754 break;
755 }
756 }
757 if (i == 10) {
758 fprintf(stderr, "timeout on cs lockup likely happen at cs %d dw %d\n",
759 rscreen->trace_ptr[1], rscreen->trace_ptr[0]);
760 } else {
761 fprintf(stderr, "cs %d executed in %dms\n", rscreen->trace_ptr[1], i * 5);
762 }
763 }
764 #endif
765
766 r600_begin_new_cs(ctx);
767 }
768
769 void r600_begin_new_cs(struct r600_context *ctx)
770 {
771 struct r600_block *enable_block = NULL;
772 unsigned shader;
773
774 ctx->pm4_dirty_cdwords = 0;
775 ctx->flags = 0;
776
777 /* Begin a new CS. */
778 r600_emit_command_buffer(ctx->rings.gfx.cs, &ctx->start_cs_cmd);
779
780 /* Re-emit states. */
781 ctx->alphatest_state.atom.dirty = true;
782 ctx->blend_color.atom.dirty = true;
783 ctx->cb_misc_state.atom.dirty = true;
784 ctx->clip_misc_state.atom.dirty = true;
785 ctx->clip_state.atom.dirty = true;
786 ctx->db_misc_state.atom.dirty = true;
787 ctx->db_state.atom.dirty = true;
788 ctx->framebuffer.atom.dirty = true;
789 ctx->poly_offset_state.atom.dirty = true;
790 ctx->vgt_state.atom.dirty = true;
791 ctx->vgt2_state.atom.dirty = true;
792 ctx->sample_mask.atom.dirty = true;
793 ctx->scissor.atom.dirty = true;
794 ctx->config_state.atom.dirty = true;
795 ctx->stencil_ref.atom.dirty = true;
796 ctx->vertex_fetch_shader.atom.dirty = true;
797 ctx->viewport.atom.dirty = true;
798
799 if (ctx->blend_state.cso)
800 ctx->blend_state.atom.dirty = true;
801 if (ctx->dsa_state.cso)
802 ctx->dsa_state.atom.dirty = true;
803 if (ctx->rasterizer_state.cso)
804 ctx->rasterizer_state.atom.dirty = true;
805
806 if (ctx->chip_class <= R700) {
807 ctx->seamless_cube_map.atom.dirty = true;
808 }
809
810 ctx->vertex_buffer_state.dirty_mask = ctx->vertex_buffer_state.enabled_mask;
811 r600_vertex_buffers_dirty(ctx);
812
813 /* Re-emit shader resources. */
814 for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
815 struct r600_constbuf_state *constbuf = &ctx->constbuf_state[shader];
816 struct r600_textures_info *samplers = &ctx->samplers[shader];
817
818 constbuf->dirty_mask = constbuf->enabled_mask;
819 samplers->views.dirty_mask = samplers->views.enabled_mask;
820 samplers->states.dirty_mask = samplers->states.enabled_mask;
821
822 r600_constant_buffers_dirty(ctx, constbuf);
823 r600_sampler_views_dirty(ctx, &samplers->views);
824 r600_sampler_states_dirty(ctx, &samplers->states);
825 }
826
827 if (ctx->streamout_suspended) {
828 ctx->streamout_start = TRUE;
829 ctx->streamout_append_bitmask = ~0;
830 }
831
832 /* resume queries */
833 if (ctx->nontimer_queries_suspended) {
834 r600_resume_nontimer_queries(ctx);
835 }
836
837 /* set all valid group as dirty so they get reemited on
838 * next draw command
839 */
840 LIST_FOR_EACH_ENTRY(enable_block, &ctx->enable_list, enable_list) {
841 if(!(enable_block->status & R600_BLOCK_STATUS_DIRTY)) {
842 LIST_ADDTAIL(&enable_block->list,&ctx->dirty);
843 enable_block->status |= R600_BLOCK_STATUS_DIRTY;
844 }
845 ctx->pm4_dirty_cdwords += enable_block->pm4_ndwords;
846 enable_block->nreg_dirty = enable_block->nreg;
847 }
848
849 /* Re-emit the draw state. */
850 ctx->last_primitive_type = -1;
851 ctx->last_start_instance = -1;
852 }
853
854 void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value)
855 {
856 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
857 uint64_t va;
858
859 r600_need_cs_space(ctx, 10, FALSE);
860
861 va = r600_resource_va(&ctx->screen->screen, (void*)fence_bo);
862 va = va + (offset << 2);
863
864 r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
865
866 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
867 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
868 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* ADDRESS_LO */
869 /* DATA_SEL | INT_EN | ADDRESS_HI */
870 cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF);
871 cs->buf[cs->cdw++] = value; /* DATA_LO */
872 cs->buf[cs->cdw++] = 0; /* DATA_HI */
873 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
874 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, fence_bo, RADEON_USAGE_WRITE);
875 }
876
877 static void r600_flush_vgt_streamout(struct r600_context *ctx)
878 {
879 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
880
881 r600_write_config_reg(cs, R_008490_CP_STRMOUT_CNTL, 0);
882
883 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
884 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0);
885
886 cs->buf[cs->cdw++] = PKT3(PKT3_WAIT_REG_MEM, 5, 0);
887 cs->buf[cs->cdw++] = WAIT_REG_MEM_EQUAL; /* wait until the register is equal to the reference value */
888 cs->buf[cs->cdw++] = R_008490_CP_STRMOUT_CNTL >> 2; /* register */
889 cs->buf[cs->cdw++] = 0;
890 cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* reference value */
891 cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* mask */
892 cs->buf[cs->cdw++] = 4; /* poll interval */
893 }
894
895 static void r600_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit)
896 {
897 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
898
899 if (buffer_enable_bit) {
900 r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(1));
901 r600_write_context_reg(cs, R_028B20_VGT_STRMOUT_BUFFER_EN, buffer_enable_bit);
902 } else {
903 r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(0));
904 }
905 }
906
907 void r600_context_streamout_begin(struct r600_context *ctx)
908 {
909 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
910 struct r600_so_target **t = ctx->so_targets;
911 unsigned *stride_in_dw = ctx->vs_shader->so.stride;
912 unsigned buffer_en, i, update_flags = 0;
913 uint64_t va;
914 unsigned num_cs_dw_streamout_end;
915
916 buffer_en = (ctx->num_so_targets >= 1 && t[0] ? 1 : 0) |
917 (ctx->num_so_targets >= 2 && t[1] ? 2 : 0) |
918 (ctx->num_so_targets >= 3 && t[2] ? 4 : 0) |
919 (ctx->num_so_targets >= 4 && t[3] ? 8 : 0);
920
921 num_cs_dw_streamout_end =
922 12 + /* flush_vgt_streamout */
923 util_bitcount(buffer_en) * 8 + /* STRMOUT_BUFFER_UPDATE */
924 3 /* set_streamout_enable(0) */;
925
926 r600_need_cs_space(ctx,
927 12 + /* flush_vgt_streamout */
928 6 + /* set_streamout_enable */
929 util_bitcount(buffer_en) * 7 + /* SET_CONTEXT_REG */
930 (ctx->family >= CHIP_RS780 &&
931 ctx->family <= CHIP_RV740 ? util_bitcount(buffer_en) * 5 : 0) + /* STRMOUT_BASE_UPDATE */
932 util_bitcount(buffer_en & ctx->streamout_append_bitmask) * 8 + /* STRMOUT_BUFFER_UPDATE */
933 util_bitcount(buffer_en & ~ctx->streamout_append_bitmask) * 6 + /* STRMOUT_BUFFER_UPDATE */
934 (ctx->family > CHIP_R600 && ctx->family < CHIP_RS780 ? 2 : 0) + /* SURFACE_BASE_UPDATE */
935 num_cs_dw_streamout_end, TRUE);
936
937 /* This must be set after r600_need_cs_space. */
938 ctx->num_cs_dw_streamout_end = num_cs_dw_streamout_end;
939
940 if (ctx->chip_class >= EVERGREEN) {
941 evergreen_flush_vgt_streamout(ctx);
942 evergreen_set_streamout_enable(ctx, buffer_en);
943 } else {
944 r600_flush_vgt_streamout(ctx);
945 r600_set_streamout_enable(ctx, buffer_en);
946 }
947
948 for (i = 0; i < ctx->num_so_targets; i++) {
949 if (t[i]) {
950 t[i]->stride_in_dw = stride_in_dw[i];
951 t[i]->so_index = i;
952 va = r600_resource_va(&ctx->screen->screen,
953 (void*)t[i]->b.buffer);
954
955 update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i);
956
957 r600_write_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 3);
958 r600_write_value(cs, (t[i]->b.buffer_offset +
959 t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
960 r600_write_value(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
961 r600_write_value(cs, va >> 8); /* BUFFER_BASE */
962
963 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
964 cs->buf[cs->cdw++] =
965 r600_context_bo_reloc(ctx, &ctx->rings.gfx, r600_resource(t[i]->b.buffer),
966 RADEON_USAGE_WRITE);
967
968 /* R7xx requires this packet after updating BUFFER_BASE.
969 * Without this, R7xx locks up. */
970 if (ctx->family >= CHIP_RS780 && ctx->family <= CHIP_RV740) {
971 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BASE_UPDATE, 1, 0);
972 cs->buf[cs->cdw++] = i;
973 cs->buf[cs->cdw++] = va >> 8;
974
975 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
976 cs->buf[cs->cdw++] =
977 r600_context_bo_reloc(ctx, &ctx->rings.gfx, r600_resource(t[i]->b.buffer),
978 RADEON_USAGE_WRITE);
979 }
980
981 if (ctx->streamout_append_bitmask & (1 << i)) {
982 va = r600_resource_va(&ctx->screen->screen,
983 (void*)t[i]->buf_filled_size) + t[i]->buf_filled_size_offset;
984 /* Append. */
985 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0);
986 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) |
987 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM); /* control */
988 cs->buf[cs->cdw++] = 0; /* unused */
989 cs->buf[cs->cdw++] = 0; /* unused */
990 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* src address lo */
991 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */
992
993 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
994 cs->buf[cs->cdw++] =
995 r600_context_bo_reloc(ctx, &ctx->rings.gfx, t[i]->buf_filled_size,
996 RADEON_USAGE_READ);
997 } else {
998 /* Start from the beginning. */
999 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0);
1000 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) |
1001 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET); /* control */
1002 cs->buf[cs->cdw++] = 0; /* unused */
1003 cs->buf[cs->cdw++] = 0; /* unused */
1004 cs->buf[cs->cdw++] = t[i]->b.buffer_offset >> 2; /* buffer offset in DW */
1005 cs->buf[cs->cdw++] = 0; /* unused */
1006 }
1007 }
1008 }
1009
1010 if (ctx->family > CHIP_R600 && ctx->family < CHIP_RS780) {
1011 cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0);
1012 cs->buf[cs->cdw++] = update_flags;
1013 }
1014 }
1015
1016 void r600_context_streamout_end(struct r600_context *ctx)
1017 {
1018 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
1019 struct r600_so_target **t = ctx->so_targets;
1020 unsigned i;
1021 uint64_t va;
1022
1023 if (ctx->chip_class >= EVERGREEN) {
1024 evergreen_flush_vgt_streamout(ctx);
1025 } else {
1026 r600_flush_vgt_streamout(ctx);
1027 }
1028
1029 for (i = 0; i < ctx->num_so_targets; i++) {
1030 if (t[i]) {
1031 va = r600_resource_va(&ctx->screen->screen,
1032 (void*)t[i]->buf_filled_size) + t[i]->buf_filled_size_offset;
1033 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0);
1034 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) |
1035 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
1036 STRMOUT_STORE_BUFFER_FILLED_SIZE; /* control */
1037 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* dst address lo */
1038 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* dst address hi */
1039 cs->buf[cs->cdw++] = 0; /* unused */
1040 cs->buf[cs->cdw++] = 0; /* unused */
1041
1042 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
1043 cs->buf[cs->cdw++] =
1044 r600_context_bo_reloc(ctx, &ctx->rings.gfx, t[i]->buf_filled_size,
1045 RADEON_USAGE_WRITE);
1046
1047 }
1048 }
1049
1050 if (ctx->chip_class >= EVERGREEN) {
1051 ctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH;
1052 evergreen_set_streamout_enable(ctx, 0);
1053 } else {
1054 if (ctx->chip_class >= R700) {
1055 ctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH;
1056 }
1057 r600_set_streamout_enable(ctx, 0);
1058 }
1059 ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
1060 ctx->num_cs_dw_streamout_end = 0;
1061 }
1062
1063 /* The max number of bytes to copy per packet. */
1064 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
1065
1066 void r600_cp_dma_copy_buffer(struct r600_context *rctx,
1067 struct pipe_resource *dst, uint64_t dst_offset,
1068 struct pipe_resource *src, uint64_t src_offset,
1069 unsigned size)
1070 {
1071 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
1072
1073 assert(size);
1074 assert(rctx->chip_class != R600);
1075
1076 /* CP DMA doesn't work on R600 (flushing seems to be unreliable). */
1077 if (rctx->chip_class == R600) {
1078 return;
1079 }
1080
1081 dst_offset += r600_resource_va(&rctx->screen->screen, dst);
1082 src_offset += r600_resource_va(&rctx->screen->screen, src);
1083
1084 /* We flush the caches, because we might read from or write
1085 * to resources which are bound right now. */
1086 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES |
1087 R600_CONTEXT_FLUSH_AND_INV |
1088 R600_CONTEXT_FLUSH_AND_INV_CB_META |
1089 R600_CONTEXT_STREAMOUT_FLUSH |
1090 R600_CONTEXT_WAIT_3D_IDLE;
1091
1092 /* There are differences between R700 and EG in CP DMA,
1093 * but we only use the common bits here. */
1094 while (size) {
1095 unsigned sync = 0;
1096 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
1097 unsigned src_reloc, dst_reloc;
1098
1099 r600_need_cs_space(rctx, 10 + (rctx->flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE);
1100
1101 /* Flush the caches for the first copy only. */
1102 if (rctx->flags) {
1103 r600_flush_emit(rctx);
1104 }
1105
1106 /* Do the synchronization after the last copy, so that all data is written to memory. */
1107 if (size == byte_count) {
1108 sync = PKT3_CP_DMA_CP_SYNC;
1109 }
1110
1111 /* This must be done after r600_need_cs_space. */
1112 src_reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
1113 dst_reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
1114
1115 r600_write_value(cs, PKT3(PKT3_CP_DMA, 4, 0));
1116 r600_write_value(cs, src_offset); /* SRC_ADDR_LO [31:0] */
1117 r600_write_value(cs, sync | ((src_offset >> 32) & 0xff)); /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */
1118 r600_write_value(cs, dst_offset); /* DST_ADDR_LO [31:0] */
1119 r600_write_value(cs, (dst_offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */
1120 r600_write_value(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
1121
1122 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
1123 r600_write_value(cs, src_reloc);
1124 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
1125 r600_write_value(cs, dst_reloc);
1126
1127 size -= byte_count;
1128 src_offset += byte_count;
1129 dst_offset += byte_count;
1130 }
1131 }