radeonsi: remove open-coded PS_PARTIAL_FLUSH event
[mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Marek Olšák <marek.olsak@amd.com>
25 */
26 #include "../radeon/r600_cs.h"
27 #include "si_pipe.h"
28 #include "si_shader.h"
29
30 #include "util/u_memory.h"
31 #include "util/u_upload_mgr.h"
32
33 #define SI_NUM_CONTEXTS 16
34
35 static uint32_t null_desc[8]; /* zeros */
36
37 /* Set this if you want the 3D engine to wait until CP DMA is done.
38 * It should be set on the last CP DMA packet. */
39 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
40
41 /* Set this if the source data was used as a destination in a previous CP DMA
42 * packet. It's for preventing a read-after-write (RAW) hazard between two
43 * CP DMA packets. */
44 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
45
46 /* Emit a CP DMA packet to do a copy from one buffer to another.
47 * The size must fit in bits [20:0].
48 */
49 static void si_emit_cp_dma_copy_buffer(struct si_context *sctx,
50 uint64_t dst_va, uint64_t src_va,
51 unsigned size, unsigned flags)
52 {
53 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
54 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
55 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
56
57 assert(size);
58 assert((size & ((1<<21)-1)) == size);
59
60 if (sctx->b.chip_class >= CIK) {
61 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
62 radeon_emit(cs, sync_flag); /* CP_SYNC [31] */
63 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
64 radeon_emit(cs, src_va >> 32); /* SRC_ADDR_HI [31:0] */
65 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
66 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [31:0] */
67 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
68 } else {
69 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
70 radeon_emit(cs, src_va); /* SRC_ADDR_LO [31:0] */
71 radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
72 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
73 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
74 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
75 }
76 }
77
78 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
79 static void si_emit_cp_dma_clear_buffer(struct si_context *sctx,
80 uint64_t dst_va, unsigned size,
81 uint32_t clear_value, unsigned flags)
82 {
83 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
84 uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
85 uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
86
87 assert(size);
88 assert((size & ((1<<21)-1)) == size);
89
90 if (sctx->b.chip_class >= CIK) {
91 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
92 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
93 radeon_emit(cs, clear_value); /* DATA [31:0] */
94 radeon_emit(cs, 0);
95 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
96 radeon_emit(cs, dst_va >> 32); /* DST_ADDR_HI [15:0] */
97 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
98 } else {
99 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
100 radeon_emit(cs, clear_value); /* DATA [31:0] */
101 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
102 radeon_emit(cs, dst_va); /* DST_ADDR_LO [31:0] */
103 radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
104 radeon_emit(cs, size | raw_wait); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
105 }
106 }
107
108 static void si_init_descriptors(struct si_context *sctx,
109 struct si_descriptors *desc,
110 unsigned shader_userdata_reg,
111 unsigned element_dw_size,
112 unsigned num_elements,
113 void (*emit_func)(struct si_context *ctx, struct r600_atom *state))
114 {
115 uint64_t va;
116
117 assert(num_elements <= sizeof(desc->enabled_mask)*8);
118 assert(num_elements <= sizeof(desc->dirty_mask)*8);
119
120 desc->atom.emit = (void*)emit_func;
121 desc->shader_userdata_reg = shader_userdata_reg;
122 desc->element_dw_size = element_dw_size;
123 desc->num_elements = num_elements;
124 desc->context_size = num_elements * element_dw_size * 4;
125
126 desc->buffer = (struct r600_resource*)
127 pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
128 PIPE_USAGE_STATIC,
129 SI_NUM_CONTEXTS * desc->context_size);
130
131 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, desc->buffer, RADEON_USAGE_READWRITE);
132 va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
133
134 /* We don't check for CS space here, because this should be called
135 * only once at context initialization. */
136 si_emit_cp_dma_clear_buffer(sctx, va, desc->buffer->b.b.width0, 0,
137 R600_CP_DMA_SYNC);
138 }
139
140 static void si_release_descriptors(struct si_descriptors *desc)
141 {
142 pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
143 }
144
145 static void si_update_descriptors(struct si_context *sctx,
146 struct si_descriptors *desc)
147 {
148 if (desc->dirty_mask) {
149 desc->atom.num_dw =
150 7 + /* copy */
151 (4 + desc->element_dw_size) * util_bitcount(desc->dirty_mask) + /* update */
152 4; /* pointer update */
153 desc->atom.dirty = true;
154 /* The descriptors are read with the K cache. */
155 sctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
156 } else {
157 desc->atom.dirty = false;
158 }
159 }
160
161 static void si_emit_shader_pointer(struct si_context *sctx,
162 struct si_descriptors *desc)
163 {
164 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
165 uint64_t va = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b) +
166 desc->current_context_id * desc->context_size;
167
168 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
169 radeon_emit(cs, (desc->shader_userdata_reg - SI_SH_REG_OFFSET) >> 2);
170 radeon_emit(cs, va);
171 radeon_emit(cs, va >> 32);
172 }
173
174 static void si_emit_descriptors(struct si_context *sctx,
175 struct si_descriptors *desc,
176 uint32_t **descriptors)
177 {
178 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
179 uint64_t va_base;
180 int packet_start;
181 int packet_size = 0;
182 int last_index = desc->num_elements; /* point to a non-existing element */
183 unsigned dirty_mask = desc->dirty_mask;
184 unsigned new_context_id = (desc->current_context_id + 1) % SI_NUM_CONTEXTS;
185
186 assert(dirty_mask);
187
188 va_base = r600_resource_va(sctx->b.b.screen, &desc->buffer->b.b);
189
190 /* Copy the descriptors to a new context slot. */
191 /* XXX Consider using TC or L2 for this copy on CIK. */
192 si_emit_cp_dma_copy_buffer(sctx,
193 va_base + new_context_id * desc->context_size,
194 va_base + desc->current_context_id * desc->context_size,
195 desc->context_size, R600_CP_DMA_SYNC);
196
197 va_base += new_context_id * desc->context_size;
198
199 /* Update the descriptors.
200 * Updates of consecutive descriptors are merged to one WRITE_DATA packet.
201 *
202 * XXX When unbinding lots of resources, consider clearing the memory
203 * with CP DMA instead of emitting zeros.
204 */
205 while (dirty_mask) {
206 int i = u_bit_scan(&dirty_mask);
207
208 assert(i < desc->num_elements);
209
210 if (last_index+1 == i && packet_size) {
211 /* Append new data at the end of the last packet. */
212 packet_size += desc->element_dw_size;
213 cs->buf[packet_start] = PKT3(PKT3_WRITE_DATA, packet_size, 0);
214 } else {
215 /* Start a new packet. */
216 uint64_t va = va_base + i * desc->element_dw_size * 4;
217
218 packet_start = cs->cdw;
219 packet_size = 2 + desc->element_dw_size;
220
221 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, packet_size, 0));
222 radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_TC_OR_L2) |
223 PKT3_WRITE_DATA_WR_CONFIRM |
224 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
225 radeon_emit(cs, va & 0xFFFFFFFFUL);
226 radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
227 }
228
229 radeon_emit_array(cs, descriptors[i], desc->element_dw_size);
230
231 last_index = i;
232 }
233
234 desc->dirty_mask = 0;
235 desc->current_context_id = new_context_id;
236
237 /* Now update the shader userdata pointer. */
238 si_emit_shader_pointer(sctx, desc);
239 }
240
241 static unsigned si_get_shader_user_data_base(unsigned shader)
242 {
243 switch (shader) {
244 case PIPE_SHADER_VERTEX:
245 return R_00B130_SPI_SHADER_USER_DATA_VS_0;
246 case PIPE_SHADER_GEOMETRY:
247 return R_00B230_SPI_SHADER_USER_DATA_GS_0;
248 case PIPE_SHADER_FRAGMENT:
249 return R_00B030_SPI_SHADER_USER_DATA_PS_0;
250 default:
251 assert(0);
252 return 0;
253 }
254 }
255
256 /* SAMPLER VIEWS */
257
258 static void si_emit_sampler_views(struct si_context *sctx, struct r600_atom *atom)
259 {
260 struct si_sampler_views *views = (struct si_sampler_views*)atom;
261
262 si_emit_descriptors(sctx, &views->desc, views->desc_data);
263 }
264
265 static void si_init_sampler_views(struct si_context *sctx,
266 struct si_sampler_views *views,
267 unsigned shader)
268 {
269 si_init_descriptors(sctx, &views->desc,
270 si_get_shader_user_data_base(shader) +
271 SI_SGPR_RESOURCE * 4,
272 8, NUM_SAMPLER_VIEWS, si_emit_sampler_views);
273 }
274
275 static void si_release_sampler_views(struct si_sampler_views *views)
276 {
277 int i;
278
279 for (i = 0; i < Elements(views->views); i++) {
280 pipe_sampler_view_reference(&views->views[i], NULL);
281 }
282 si_release_descriptors(&views->desc);
283 }
284
285 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
286 struct si_sampler_views *views)
287 {
288 unsigned mask = views->desc.enabled_mask;
289
290 /* Add relocations to the CS. */
291 while (mask) {
292 int i = u_bit_scan(&mask);
293 struct si_pipe_sampler_view *rview =
294 (struct si_pipe_sampler_view*)views->views[i];
295
296 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
297 }
298
299 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer, RADEON_USAGE_READWRITE);
300
301 si_emit_shader_pointer(sctx, &views->desc);
302 }
303
304 void si_set_sampler_view(struct si_context *sctx, unsigned shader,
305 unsigned slot, struct pipe_sampler_view *view,
306 unsigned *view_desc)
307 {
308 struct si_sampler_views *views = &sctx->samplers[shader].views;
309
310 if (views->views[slot] == view)
311 return;
312
313 if (view) {
314 struct si_pipe_sampler_view *rview =
315 (struct si_pipe_sampler_view*)view;
316
317 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, rview->resource, RADEON_USAGE_READ);
318
319 pipe_sampler_view_reference(&views->views[slot], view);
320 views->desc_data[slot] = view_desc;
321 views->desc.enabled_mask |= 1 << slot;
322 } else {
323 pipe_sampler_view_reference(&views->views[slot], NULL);
324 views->desc_data[slot] = null_desc;
325 views->desc.enabled_mask &= ~(1 << slot);
326 }
327
328 views->desc.dirty_mask |= 1 << slot;
329 si_update_descriptors(sctx, &views->desc);
330 }
331
332 /* BUFFER RESOURCES */
333
334 static void si_emit_buffer_resources(struct si_context *sctx, struct r600_atom *atom)
335 {
336 struct si_buffer_resources *buffers = (struct si_buffer_resources*)atom;
337
338 si_emit_descriptors(sctx, &buffers->desc, buffers->desc_data);
339 }
340
341 static void si_init_buffer_resources(struct si_context *sctx,
342 struct si_buffer_resources *buffers,
343 unsigned num_buffers, unsigned shader,
344 unsigned shader_userdata_index,
345 enum radeon_bo_usage shader_usage)
346 {
347 int i;
348
349 buffers->num_buffers = num_buffers;
350 buffers->shader_usage = shader_usage;
351 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
352 buffers->desc_storage = CALLOC(num_buffers, sizeof(uint32_t) * 4);
353
354 /* si_emit_descriptors only accepts an array of arrays.
355 * This adds such an array. */
356 buffers->desc_data = CALLOC(num_buffers, sizeof(uint32_t*));
357 for (i = 0; i < num_buffers; i++) {
358 buffers->desc_data[i] = &buffers->desc_storage[i*4];
359 }
360
361 si_init_descriptors(sctx, &buffers->desc,
362 si_get_shader_user_data_base(shader) +
363 shader_userdata_index*4, 4, num_buffers,
364 si_emit_buffer_resources);
365 }
366
367 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
368 {
369 int i;
370
371 for (i = 0; i < Elements(buffers->buffers); i++) {
372 pipe_resource_reference(&buffers->buffers[i], NULL);
373 }
374
375 FREE(buffers->buffers);
376 FREE(buffers->desc_storage);
377 FREE(buffers->desc_data);
378 si_release_descriptors(&buffers->desc);
379 }
380
381 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
382 struct si_buffer_resources *buffers)
383 {
384 unsigned mask = buffers->desc.enabled_mask;
385
386 /* Add relocations to the CS. */
387 while (mask) {
388 int i = u_bit_scan(&mask);
389
390 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
391 (struct r600_resource*)buffers->buffers[i],
392 buffers->shader_usage);
393 }
394
395 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
396 buffers->desc.buffer, RADEON_USAGE_READWRITE);
397
398 si_emit_shader_pointer(sctx, &buffers->desc);
399 }
400
401 /* CONSTANT BUFFERS */
402
403 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
404 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
405 {
406 if (SI_BIG_ENDIAN) {
407 uint32_t *tmpPtr;
408 unsigned i;
409
410 if (!(tmpPtr = malloc(size))) {
411 R600_ERR("Failed to allocate BE swap buffer.\n");
412 return;
413 }
414
415 for (i = 0; i < size / 4; ++i) {
416 tmpPtr[i] = util_bswap32(((uint32_t *)ptr)[i]);
417 }
418
419 u_upload_data(sctx->b.uploader, 0, size, tmpPtr, const_offset,
420 (struct pipe_resource**)rbuffer);
421
422 free(tmpPtr);
423 } else {
424 u_upload_data(sctx->b.uploader, 0, size, ptr, const_offset,
425 (struct pipe_resource**)rbuffer);
426 }
427 }
428
429 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
430 struct pipe_constant_buffer *input)
431 {
432 struct si_context *sctx = (struct si_context *)ctx;
433 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
434
435 if (shader >= SI_NUM_SHADERS)
436 return;
437
438 assert(slot < buffers->num_buffers);
439 pipe_resource_reference(&buffers->buffers[slot], NULL);
440
441 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
442 * with a NULL buffer). We need to use a dummy buffer instead. */
443 if (sctx->b.chip_class == CIK &&
444 (!input || (!input->buffer && !input->user_buffer)))
445 input = &sctx->null_const_buf;
446
447 if (input && (input->buffer || input->user_buffer)) {
448 struct pipe_resource *buffer = NULL;
449 uint64_t va;
450
451 /* Upload the user buffer if needed. */
452 if (input->user_buffer) {
453 unsigned buffer_offset;
454
455 si_upload_const_buffer(sctx,
456 (struct r600_resource**)&buffer, input->user_buffer,
457 input->buffer_size, &buffer_offset);
458 va = r600_resource_va(ctx->screen, buffer) + buffer_offset;
459 } else {
460 pipe_resource_reference(&buffer, input->buffer);
461 va = r600_resource_va(ctx->screen, buffer) + input->buffer_offset;
462 }
463
464 /* Set the descriptor. */
465 uint32_t *desc = buffers->desc_data[slot];
466 desc[0] = va;
467 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
468 S_008F04_STRIDE(0);
469 desc[2] = input->buffer_size;
470 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
471 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
472 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
473 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
474 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
475 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
476
477 buffers->buffers[slot] = buffer;
478 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
479 (struct r600_resource*)buffer, buffers->shader_usage);
480 buffers->desc.enabled_mask |= 1 << slot;
481 } else {
482 /* Clear the descriptor. */
483 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
484 buffers->desc.enabled_mask &= ~(1 << slot);
485 }
486
487 buffers->desc.dirty_mask |= 1 << slot;
488 si_update_descriptors(sctx, &buffers->desc);
489 }
490
491 /* STREAMOUT BUFFERS */
492
493 static void si_set_streamout_targets(struct pipe_context *ctx,
494 unsigned num_targets,
495 struct pipe_stream_output_target **targets,
496 unsigned append_bitmask)
497 {
498 struct si_context *sctx = (struct si_context *)ctx;
499 struct si_buffer_resources *buffers = &sctx->streamout_buffers;
500 unsigned old_num_targets = sctx->b.streamout.num_targets;
501 unsigned i;
502
503 /* Streamout buffers must be bound in 2 places:
504 * 1) in VGT by setting the VGT_STRMOUT registers
505 * 2) as shader resources
506 */
507
508 /* Set the VGT regs. */
509 r600_set_streamout_targets(ctx, num_targets, targets, append_bitmask);
510
511 /* Set the shader resources.*/
512 for (i = 0; i < num_targets; i++) {
513 if (targets[i]) {
514 struct pipe_resource *buffer = targets[i]->buffer;
515 uint64_t va = r600_resource_va(ctx->screen, buffer);
516
517 /* Set the descriptor. */
518 uint32_t *desc = buffers->desc_data[i];
519 desc[0] = va;
520 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
521 desc[2] = 0xffffffff;
522 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
523 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
524 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
525 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
526
527 /* Set the resource. */
528 pipe_resource_reference(&buffers->buffers[i], buffer);
529 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
530 (struct r600_resource*)buffer,
531 buffers->shader_usage);
532 buffers->desc.enabled_mask |= 1 << i;
533 } else {
534 /* Clear the descriptor and unset the resource. */
535 memset(buffers->desc_data[i], 0, sizeof(uint32_t) * 4);
536 pipe_resource_reference(&buffers->buffers[i], NULL);
537 buffers->desc.enabled_mask &= ~(1 << i);
538 }
539 buffers->desc.dirty_mask |= 1 << i;
540 }
541 for (; i < old_num_targets; i++) {
542 /* Clear the descriptor and unset the resource. */
543 memset(buffers->desc_data[i], 0, sizeof(uint32_t) * 4);
544 pipe_resource_reference(&buffers->buffers[i], NULL);
545 buffers->desc.enabled_mask &= ~(1 << i);
546 buffers->desc.dirty_mask |= 1 << i;
547 }
548
549 si_update_descriptors(sctx, &buffers->desc);
550 }
551
552 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
553 uint32_t *desc, uint64_t old_buf_va,
554 struct pipe_resource *new_buf)
555 {
556 /* Retrieve the buffer offset from the descriptor. */
557 uint64_t old_desc_va =
558 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
559
560 assert(old_buf_va <= old_desc_va);
561 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
562
563 /* Update the descriptor. */
564 uint64_t va = r600_resource_va(ctx->screen, new_buf) + offset_within_buffer;
565
566 desc[0] = va;
567 desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
568 S_008F04_BASE_ADDRESS_HI(va >> 32);
569 }
570
571 /* BUFFER DISCARD/INVALIDATION */
572
573 /* Reallocate a buffer a update all resource bindings where the buffer is
574 * bound.
575 *
576 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
577 * idle by discarding its contents. Apps usually tell us when to do this using
578 * map_buffer flags, for example.
579 */
580 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
581 {
582 struct si_context *sctx = (struct si_context*)ctx;
583 struct r600_resource *rbuffer = r600_resource(buf);
584 unsigned i, shader, alignment = rbuffer->buf->alignment;
585 uint64_t old_va = r600_resource_va(ctx->screen, buf);
586
587 /* Discard the buffer. */
588 pb_reference(&rbuffer->buf, NULL);
589
590 /* Create a new one in the same pipe_resource. */
591 r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0, alignment,
592 TRUE, rbuffer->b.b.usage);
593
594 /* We changed the buffer, now we need to bind it where the old one
595 * was bound. This consists of 2 things:
596 * 1) Updating the resource descriptor and dirtying it.
597 * 2) Adding a relocation to the CS, so that it's usable.
598 */
599
600 /* Vertex buffers. */
601 /* Nothing to do. Vertex buffer bindings are updated before every draw call. */
602
603 /* Streamout buffers. */
604 for (i = 0; i < sctx->streamout_buffers.num_buffers; i++) {
605 if (sctx->streamout_buffers.buffers[i] == buf) {
606 /* Update the descriptor. */
607 si_desc_reset_buffer_offset(ctx, sctx->streamout_buffers.desc_data[i],
608 old_va, buf);
609
610 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
611 (struct r600_resource*)buf,
612 sctx->streamout_buffers.shader_usage);
613 sctx->streamout_buffers.desc.dirty_mask |= 1 << i;
614 si_update_descriptors(sctx, &sctx->streamout_buffers.desc);
615
616 /* Update the streamout state. */
617 if (sctx->b.streamout.begin_emitted) {
618 r600_emit_streamout_end(&sctx->b);
619 }
620 sctx->b.streamout.append_bitmask = sctx->b.streamout.enabled_mask;
621 r600_streamout_buffers_dirty(&sctx->b);
622 }
623 }
624
625 /* Constant buffers. */
626 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
627 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
628 bool found = false;
629 uint32_t mask = buffers->desc.enabled_mask;
630
631 while (mask) {
632 unsigned i = u_bit_scan(&mask);
633 if (buffers->buffers[i] == buf) {
634 si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
635 old_va, buf);
636
637 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
638 rbuffer, buffers->shader_usage);
639
640 buffers->desc.dirty_mask |= 1 << i;
641 found = true;
642 }
643 }
644 if (found) {
645 si_update_descriptors(sctx, &buffers->desc);
646 }
647 }
648
649 /* Texture buffers. */
650 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
651 struct si_sampler_views *views = &sctx->samplers[shader].views;
652 bool found = false;
653 uint32_t mask = views->desc.enabled_mask;
654
655 while (mask) {
656 unsigned i = u_bit_scan(&mask);
657 if (views->views[i]->texture == buf) {
658 /* This updates the sampler view directly. */
659 si_desc_reset_buffer_offset(ctx, views->desc_data[i],
660 old_va, buf);
661
662 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
663 rbuffer, RADEON_USAGE_READ);
664
665 views->desc.dirty_mask |= 1 << i;
666 found = true;
667 }
668 }
669 if (found) {
670 si_update_descriptors(sctx, &views->desc);
671 }
672 }
673 }
674
675 /* CP DMA */
676
677 /* The max number of bytes to copy per packet. */
678 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
679
680 static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
681 unsigned offset, unsigned size, unsigned value)
682 {
683 struct si_context *sctx = (struct si_context*)ctx;
684
685 if (!size)
686 return;
687
688 /* Mark the buffer range of destination as valid (initialized),
689 * so that transfer_map knows it should wait for the GPU when mapping
690 * that range. */
691 util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
692 offset + size);
693
694 /* Fallback for unaligned clears. */
695 if (offset % 4 != 0 || size % 4 != 0) {
696 uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
697 sctx->b.rings.gfx.cs,
698 PIPE_TRANSFER_WRITE);
699 size /= 4;
700 for (unsigned i = 0; i < size; i++)
701 *map++ = value;
702 return;
703 }
704
705 uint64_t va = r600_resource_va(&sctx->screen->b.b, dst) + offset;
706
707 /* Flush the caches where the resource is bound. */
708 /* XXX only flush the caches where the buffer is bound. */
709 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
710 R600_CONTEXT_INV_CONST_CACHE |
711 R600_CONTEXT_FLUSH_AND_INV_CB |
712 R600_CONTEXT_FLUSH_AND_INV_DB |
713 R600_CONTEXT_FLUSH_AND_INV_CB_META |
714 R600_CONTEXT_FLUSH_AND_INV_DB_META;
715 sctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
716
717 while (size) {
718 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
719 unsigned dma_flags = 0;
720
721 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0),
722 FALSE);
723
724 /* This must be done after need_cs_space. */
725 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
726 (struct r600_resource*)dst, RADEON_USAGE_WRITE);
727
728 /* Flush the caches for the first copy only.
729 * Also wait for the previous CP DMA operations. */
730 if (sctx->b.flags) {
731 si_emit_cache_flush(&sctx->b, NULL);
732 dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */
733 }
734
735 /* Do the synchronization after the last copy, so that all data is written to memory. */
736 if (size == byte_count)
737 dma_flags |= R600_CP_DMA_SYNC;
738
739 /* Emit the clear packet. */
740 si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags);
741
742 size -= byte_count;
743 va += byte_count;
744 }
745
746 /* Flush the caches again in case the 3D engine has been prefetching
747 * the resource. */
748 /* XXX only flush the caches where the buffer is bound. */
749 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
750 R600_CONTEXT_INV_CONST_CACHE |
751 R600_CONTEXT_FLUSH_AND_INV_CB |
752 R600_CONTEXT_FLUSH_AND_INV_DB |
753 R600_CONTEXT_FLUSH_AND_INV_CB_META |
754 R600_CONTEXT_FLUSH_AND_INV_DB_META;
755 }
756
757 void si_copy_buffer(struct si_context *sctx,
758 struct pipe_resource *dst, struct pipe_resource *src,
759 uint64_t dst_offset, uint64_t src_offset, unsigned size)
760 {
761 if (!size)
762 return;
763
764 /* Mark the buffer range of destination as valid (initialized),
765 * so that transfer_map knows it should wait for the GPU when mapping
766 * that range. */
767 util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
768 dst_offset + size);
769
770 dst_offset += r600_resource_va(&sctx->screen->b.b, dst);
771 src_offset += r600_resource_va(&sctx->screen->b.b, src);
772
773 /* Flush the caches where the resource is bound. */
774 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
775 R600_CONTEXT_INV_CONST_CACHE |
776 R600_CONTEXT_FLUSH_AND_INV_CB |
777 R600_CONTEXT_FLUSH_AND_INV_DB |
778 R600_CONTEXT_FLUSH_AND_INV_CB_META |
779 R600_CONTEXT_FLUSH_AND_INV_DB_META |
780 R600_CONTEXT_WAIT_3D_IDLE;
781
782 while (size) {
783 unsigned sync_flags = 0;
784 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
785
786 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE);
787
788 /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
789 if (sctx->b.flags) {
790 si_emit_cache_flush(&sctx->b, NULL);
791 sync_flags |= SI_CP_DMA_RAW_WAIT;
792 }
793
794 /* Do the synchronization after the last copy, so that all data is written to memory. */
795 if (size == byte_count) {
796 sync_flags |= R600_CP_DMA_SYNC;
797 }
798
799 /* This must be done after r600_need_cs_space. */
800 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
801 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
802
803 si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags);
804
805 size -= byte_count;
806 src_offset += byte_count;
807 dst_offset += byte_count;
808 }
809
810 sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
811 R600_CONTEXT_INV_CONST_CACHE |
812 R600_CONTEXT_FLUSH_AND_INV_CB |
813 R600_CONTEXT_FLUSH_AND_INV_DB |
814 R600_CONTEXT_FLUSH_AND_INV_CB_META |
815 R600_CONTEXT_FLUSH_AND_INV_DB_META;
816 }
817
818 /* INIT/DEINIT */
819
820 void si_init_all_descriptors(struct si_context *sctx)
821 {
822 int i;
823
824 for (i = 0; i < SI_NUM_SHADERS; i++) {
825 si_init_buffer_resources(sctx, &sctx->const_buffers[i],
826 NUM_CONST_BUFFERS, i, SI_SGPR_CONST,
827 RADEON_USAGE_READ);
828
829 si_init_sampler_views(sctx, &sctx->samplers[i].views, i);
830
831 sctx->atoms.const_buffers[i] = &sctx->const_buffers[i].desc.atom;
832 sctx->atoms.sampler_views[i] = &sctx->samplers[i].views.desc.atom;
833 }
834
835 si_init_buffer_resources(sctx, &sctx->streamout_buffers, 4, PIPE_SHADER_VERTEX,
836 SI_SGPR_SO_BUFFER, RADEON_USAGE_WRITE);
837 sctx->atoms.streamout_buffers = &sctx->streamout_buffers.desc.atom;
838
839 /* Set pipe_context functions. */
840 sctx->b.b.set_constant_buffer = si_set_constant_buffer;
841 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
842 sctx->b.clear_buffer = si_clear_buffer;
843 sctx->b.invalidate_buffer = si_invalidate_buffer;
844 }
845
846 void si_release_all_descriptors(struct si_context *sctx)
847 {
848 int i;
849
850 for (i = 0; i < SI_NUM_SHADERS; i++) {
851 si_release_buffer_resources(&sctx->const_buffers[i]);
852 si_release_sampler_views(&sctx->samplers[i].views);
853 }
854 si_release_buffer_resources(&sctx->streamout_buffers);
855 }
856
857 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
858 {
859 int i;
860
861 for (i = 0; i < SI_NUM_SHADERS; i++) {
862 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
863 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
864 }
865 si_buffer_resources_begin_new_cs(sctx, &sctx->streamout_buffers);
866 }