2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Marek Olšák <marek.olsak@amd.com>
26 #include "../radeon/r600_cs.h"
28 #include "si_shader.h"
31 #include "util/u_memory.h"
32 #include "util/u_upload_mgr.h"
34 #define SI_NUM_CONTEXTS 16
36 static uint32_t null_desc
[8]; /* zeros */
38 /* Set this if you want the 3D engine to wait until CP DMA is done.
39 * It should be set on the last CP DMA packet. */
40 #define R600_CP_DMA_SYNC (1 << 0) /* R600+ */
42 /* Set this if the source data was used as a destination in a previous CP DMA
43 * packet. It's for preventing a read-after-write (RAW) hazard between two
45 #define SI_CP_DMA_RAW_WAIT (1 << 1) /* SI+ */
47 /* Emit a CP DMA packet to do a copy from one buffer to another.
48 * The size must fit in bits [20:0].
50 static void si_emit_cp_dma_copy_buffer(struct si_context
*sctx
,
51 uint64_t dst_va
, uint64_t src_va
,
52 unsigned size
, unsigned flags
)
54 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
55 uint32_t sync_flag
= flags
& R600_CP_DMA_SYNC
? PKT3_CP_DMA_CP_SYNC
: 0;
56 uint32_t raw_wait
= flags
& SI_CP_DMA_RAW_WAIT
? PKT3_CP_DMA_CMD_RAW_WAIT
: 0;
59 assert((size
& ((1<<21)-1)) == size
);
61 if (sctx
->b
.chip_class
>= CIK
) {
62 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
63 radeon_emit(cs
, sync_flag
); /* CP_SYNC [31] */
64 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
65 radeon_emit(cs
, src_va
>> 32); /* SRC_ADDR_HI [31:0] */
66 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
67 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [31:0] */
68 radeon_emit(cs
, size
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
70 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
71 radeon_emit(cs
, src_va
); /* SRC_ADDR_LO [31:0] */
72 radeon_emit(cs
, sync_flag
| ((src_va
>> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
73 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
74 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
75 radeon_emit(cs
, size
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
79 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
80 static void si_emit_cp_dma_clear_buffer(struct si_context
*sctx
,
81 uint64_t dst_va
, unsigned size
,
82 uint32_t clear_value
, unsigned flags
)
84 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
85 uint32_t sync_flag
= flags
& R600_CP_DMA_SYNC
? PKT3_CP_DMA_CP_SYNC
: 0;
86 uint32_t raw_wait
= flags
& SI_CP_DMA_RAW_WAIT
? PKT3_CP_DMA_CMD_RAW_WAIT
: 0;
89 assert((size
& ((1<<21)-1)) == size
);
91 if (sctx
->b
.chip_class
>= CIK
) {
92 radeon_emit(cs
, PKT3(PKT3_DMA_DATA
, 5, 0));
93 radeon_emit(cs
, sync_flag
| PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
94 radeon_emit(cs
, clear_value
); /* DATA [31:0] */
96 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
97 radeon_emit(cs
, dst_va
>> 32); /* DST_ADDR_HI [15:0] */
98 radeon_emit(cs
, size
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
100 radeon_emit(cs
, PKT3(PKT3_CP_DMA
, 4, 0));
101 radeon_emit(cs
, clear_value
); /* DATA [31:0] */
102 radeon_emit(cs
, sync_flag
| PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
103 radeon_emit(cs
, dst_va
); /* DST_ADDR_LO [31:0] */
104 radeon_emit(cs
, (dst_va
>> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
105 radeon_emit(cs
, size
| raw_wait
); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
109 static void si_init_descriptors(struct si_context
*sctx
,
110 struct si_descriptors
*desc
,
111 unsigned shader_userdata_reg
,
112 unsigned element_dw_size
,
113 unsigned num_elements
,
114 void (*emit_func
)(struct si_context
*ctx
, struct r600_atom
*state
))
118 assert(num_elements
<= sizeof(desc
->enabled_mask
)*8);
119 assert(num_elements
<= sizeof(desc
->dirty_mask
)*8);
121 desc
->atom
.emit
= (void*)emit_func
;
122 desc
->shader_userdata_reg
= shader_userdata_reg
;
123 desc
->element_dw_size
= element_dw_size
;
124 desc
->num_elements
= num_elements
;
125 desc
->context_size
= num_elements
* element_dw_size
* 4;
127 desc
->buffer
= (struct r600_resource
*)
128 pipe_buffer_create(sctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
130 SI_NUM_CONTEXTS
* desc
->context_size
);
132 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
, desc
->buffer
,
133 RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_DATA
);
134 va
= r600_resource_va(sctx
->b
.b
.screen
, &desc
->buffer
->b
.b
);
136 /* We don't check for CS space here, because this should be called
137 * only once at context initialization. */
138 si_emit_cp_dma_clear_buffer(sctx
, va
, desc
->buffer
->b
.b
.width0
, 0,
142 static void si_release_descriptors(struct si_descriptors
*desc
)
144 pipe_resource_reference((struct pipe_resource
**)&desc
->buffer
, NULL
);
147 static void si_update_descriptors(struct si_context
*sctx
,
148 struct si_descriptors
*desc
)
150 if (desc
->dirty_mask
) {
153 (4 + desc
->element_dw_size
) * util_bitcount(desc
->dirty_mask
) + /* update */
154 4; /* pointer update */
156 if (desc
->shader_userdata_reg
>= R_00B130_SPI_SHADER_USER_DATA_VS_0
&&
157 desc
->shader_userdata_reg
< R_00B230_SPI_SHADER_USER_DATA_GS_0
)
158 desc
->atom
.num_dw
+= 4; /* second pointer update */
160 desc
->atom
.dirty
= true;
161 /* The descriptors are read with the K cache. */
162 sctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
;
164 desc
->atom
.dirty
= false;
168 static void si_emit_shader_pointer(struct si_context
*sctx
,
169 struct r600_atom
*atom
)
171 struct si_descriptors
*desc
= (struct si_descriptors
*)atom
;
172 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
173 uint64_t va
= r600_resource_va(sctx
->b
.b
.screen
, &desc
->buffer
->b
.b
) +
174 desc
->current_context_id
* desc
->context_size
+
177 radeon_emit(cs
, PKT3(PKT3_SET_SH_REG
, 2, 0));
178 radeon_emit(cs
, (desc
->shader_userdata_reg
- SI_SH_REG_OFFSET
) >> 2);
180 radeon_emit(cs
, va
>> 32);
182 if (desc
->shader_userdata_reg
>= R_00B130_SPI_SHADER_USER_DATA_VS_0
&&
183 desc
->shader_userdata_reg
< R_00B230_SPI_SHADER_USER_DATA_GS_0
) {
184 radeon_emit(cs
, PKT3(PKT3_SET_SH_REG
, 2, 0));
185 radeon_emit(cs
, (desc
->shader_userdata_reg
+
186 (R_00B330_SPI_SHADER_USER_DATA_ES_0
-
187 R_00B130_SPI_SHADER_USER_DATA_VS_0
) -
188 SI_SH_REG_OFFSET
) >> 2);
190 radeon_emit(cs
, va
>> 32);
194 static void si_emit_descriptors(struct si_context
*sctx
,
195 struct si_descriptors
*desc
,
196 uint32_t **descriptors
)
198 struct radeon_winsys_cs
*cs
= sctx
->b
.rings
.gfx
.cs
;
202 int last_index
= desc
->num_elements
; /* point to a non-existing element */
203 unsigned dirty_mask
= desc
->dirty_mask
;
204 unsigned new_context_id
= (desc
->current_context_id
+ 1) % SI_NUM_CONTEXTS
;
208 va_base
= r600_resource_va(sctx
->b
.b
.screen
, &desc
->buffer
->b
.b
);
210 /* Copy the descriptors to a new context slot. */
211 /* XXX Consider using TC or L2 for this copy on CIK. */
212 si_emit_cp_dma_copy_buffer(sctx
,
213 va_base
+ new_context_id
* desc
->context_size
,
214 va_base
+ desc
->current_context_id
* desc
->context_size
,
215 desc
->context_size
, R600_CP_DMA_SYNC
);
217 va_base
+= new_context_id
* desc
->context_size
;
219 /* Update the descriptors.
220 * Updates of consecutive descriptors are merged to one WRITE_DATA packet.
222 * XXX When unbinding lots of resources, consider clearing the memory
223 * with CP DMA instead of emitting zeros.
226 int i
= u_bit_scan(&dirty_mask
);
228 assert(i
< desc
->num_elements
);
230 if (last_index
+1 == i
&& packet_size
) {
231 /* Append new data at the end of the last packet. */
232 packet_size
+= desc
->element_dw_size
;
233 cs
->buf
[packet_start
] = PKT3(PKT3_WRITE_DATA
, packet_size
, 0);
235 /* Start a new packet. */
236 uint64_t va
= va_base
+ i
* desc
->element_dw_size
* 4;
238 packet_start
= cs
->cdw
;
239 packet_size
= 2 + desc
->element_dw_size
;
241 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, packet_size
, 0));
242 radeon_emit(cs
, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_TC_OR_L2
) |
243 PKT3_WRITE_DATA_WR_CONFIRM
|
244 PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME
));
245 radeon_emit(cs
, va
& 0xFFFFFFFFUL
);
246 radeon_emit(cs
, (va
>> 32UL) & 0xFFFFFFFFUL
);
249 radeon_emit_array(cs
, descriptors
[i
], desc
->element_dw_size
);
254 desc
->dirty_mask
= 0;
255 desc
->current_context_id
= new_context_id
;
257 /* Now update the shader userdata pointer. */
258 si_emit_shader_pointer(sctx
, &desc
->atom
);
261 static unsigned si_get_shader_user_data_base(unsigned shader
)
264 case PIPE_SHADER_VERTEX
:
265 return R_00B130_SPI_SHADER_USER_DATA_VS_0
;
266 case PIPE_SHADER_GEOMETRY
:
267 return R_00B230_SPI_SHADER_USER_DATA_GS_0
;
268 case PIPE_SHADER_FRAGMENT
:
269 return R_00B030_SPI_SHADER_USER_DATA_PS_0
;
278 static void si_emit_sampler_views(struct si_context
*sctx
, struct r600_atom
*atom
)
280 struct si_sampler_views
*views
= (struct si_sampler_views
*)atom
;
282 si_emit_descriptors(sctx
, &views
->desc
, views
->desc_data
);
285 static void si_init_sampler_views(struct si_context
*sctx
,
286 struct si_sampler_views
*views
,
289 si_init_descriptors(sctx
, &views
->desc
,
290 si_get_shader_user_data_base(shader
) +
291 SI_SGPR_RESOURCE
* 4,
292 8, SI_NUM_SAMPLER_VIEWS
, si_emit_sampler_views
);
295 static void si_release_sampler_views(struct si_sampler_views
*views
)
299 for (i
= 0; i
< Elements(views
->views
); i
++) {
300 pipe_sampler_view_reference(&views
->views
[i
], NULL
);
302 si_release_descriptors(&views
->desc
);
305 static enum radeon_bo_priority
si_get_resource_ro_priority(struct r600_resource
*res
)
307 if (res
->b
.b
.target
== PIPE_BUFFER
)
308 return RADEON_PRIO_SHADER_BUFFER_RO
;
310 if (res
->b
.b
.nr_samples
> 1)
311 return RADEON_PRIO_SHADER_TEXTURE_MSAA
;
313 return RADEON_PRIO_SHADER_TEXTURE_RO
;
316 static void si_sampler_views_begin_new_cs(struct si_context
*sctx
,
317 struct si_sampler_views
*views
)
319 unsigned mask
= views
->desc
.enabled_mask
;
321 /* Add relocations to the CS. */
323 int i
= u_bit_scan(&mask
);
324 struct si_pipe_sampler_view
*rview
=
325 (struct si_pipe_sampler_view
*)views
->views
[i
];
327 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
328 rview
->resource
, RADEON_USAGE_READ
,
329 si_get_resource_ro_priority(rview
->resource
));
332 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
, views
->desc
.buffer
,
333 RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_DATA
);
335 si_emit_shader_pointer(sctx
, &views
->desc
.atom
);
338 static void si_set_sampler_view(struct si_context
*sctx
, unsigned shader
,
339 unsigned slot
, struct pipe_sampler_view
*view
,
342 struct si_sampler_views
*views
= &sctx
->samplers
[shader
].views
;
344 if (views
->views
[slot
] == view
)
348 struct si_pipe_sampler_view
*rview
=
349 (struct si_pipe_sampler_view
*)view
;
351 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
352 rview
->resource
, RADEON_USAGE_READ
,
353 si_get_resource_ro_priority(rview
->resource
));
355 pipe_sampler_view_reference(&views
->views
[slot
], view
);
356 views
->desc_data
[slot
] = view_desc
;
357 views
->desc
.enabled_mask
|= 1 << slot
;
359 pipe_sampler_view_reference(&views
->views
[slot
], NULL
);
360 views
->desc_data
[slot
] = null_desc
;
361 views
->desc
.enabled_mask
&= ~(1 << slot
);
364 views
->desc
.dirty_mask
|= 1 << slot
;
367 static void si_set_sampler_views(struct pipe_context
*ctx
,
368 unsigned shader
, unsigned start
,
370 struct pipe_sampler_view
**views
)
372 struct si_context
*sctx
= (struct si_context
*)ctx
;
373 struct si_textures_info
*samplers
= &sctx
->samplers
[shader
];
374 struct si_pipe_sampler_view
**rviews
= (struct si_pipe_sampler_view
**)views
;
377 if (!count
|| shader
>= SI_NUM_SHADERS
)
380 for (i
= 0; i
< count
; i
++) {
381 unsigned slot
= start
+ i
;
384 samplers
->depth_texture_mask
&= ~(1 << slot
);
385 samplers
->compressed_colortex_mask
&= ~(1 << slot
);
386 si_set_sampler_view(sctx
, shader
, slot
, NULL
, NULL
);
387 si_set_sampler_view(sctx
, shader
, SI_FMASK_TEX_OFFSET
+ slot
,
392 si_set_sampler_view(sctx
, shader
, slot
, views
[i
], rviews
[i
]->state
);
394 if (views
[i
]->texture
->target
!= PIPE_BUFFER
) {
395 struct r600_texture
*rtex
=
396 (struct r600_texture
*)views
[i
]->texture
;
398 if (rtex
->is_depth
&& !rtex
->is_flushing_texture
) {
399 samplers
->depth_texture_mask
|= 1 << slot
;
401 samplers
->depth_texture_mask
&= ~(1 << slot
);
403 if (rtex
->cmask
.size
|| rtex
->fmask
.size
) {
404 samplers
->compressed_colortex_mask
|= 1 << slot
;
406 samplers
->compressed_colortex_mask
&= ~(1 << slot
);
409 if (rtex
->fmask
.size
) {
410 si_set_sampler_view(sctx
, shader
, SI_FMASK_TEX_OFFSET
+ slot
,
411 views
[i
], rviews
[i
]->fmask_state
);
413 si_set_sampler_view(sctx
, shader
, SI_FMASK_TEX_OFFSET
+ slot
,
419 sctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
;
420 si_update_descriptors(sctx
, &samplers
->views
.desc
);
425 static void si_emit_sampler_states(struct si_context
*sctx
, struct r600_atom
*atom
)
427 struct si_sampler_states
*states
= (struct si_sampler_states
*)atom
;
429 si_emit_descriptors(sctx
, &states
->desc
, states
->desc_data
);
432 static void si_sampler_states_begin_new_cs(struct si_context
*sctx
,
433 struct si_sampler_states
*states
)
435 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
, states
->desc
.buffer
,
436 RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_DATA
);
437 si_emit_shader_pointer(sctx
, &states
->desc
.atom
);
440 void si_set_sampler_descriptors(struct si_context
*sctx
, unsigned shader
,
441 unsigned start
, unsigned count
, void **states
)
443 struct si_sampler_states
*samplers
= &sctx
->samplers
[shader
].states
;
444 struct si_pipe_sampler_state
**sstates
= (struct si_pipe_sampler_state
**)states
;
448 samplers
->saved_states
[0] = states
[0];
450 samplers
->saved_states
[1] = states
[0];
451 else if (start
== 0 && count
>= 2)
452 samplers
->saved_states
[1] = states
[1];
454 for (i
= 0; i
< count
; i
++) {
455 unsigned slot
= start
+ i
;
458 samplers
->desc
.dirty_mask
&= ~(1 << slot
);
462 samplers
->desc_data
[slot
] = sstates
[i
]->val
;
463 samplers
->desc
.dirty_mask
|= 1 << slot
;
466 si_update_descriptors(sctx
, &samplers
->desc
);
469 /* BUFFER RESOURCES */
471 static void si_emit_buffer_resources(struct si_context
*sctx
, struct r600_atom
*atom
)
473 struct si_buffer_resources
*buffers
= (struct si_buffer_resources
*)atom
;
475 si_emit_descriptors(sctx
, &buffers
->desc
, buffers
->desc_data
);
478 static void si_init_buffer_resources(struct si_context
*sctx
,
479 struct si_buffer_resources
*buffers
,
480 unsigned num_buffers
, unsigned shader
,
481 unsigned shader_userdata_index
,
482 enum radeon_bo_usage shader_usage
,
483 enum radeon_bo_priority priority
)
487 buffers
->num_buffers
= num_buffers
;
488 buffers
->shader_usage
= shader_usage
;
489 buffers
->priority
= priority
;
490 buffers
->buffers
= CALLOC(num_buffers
, sizeof(struct pipe_resource
*));
491 buffers
->desc_storage
= CALLOC(num_buffers
, sizeof(uint32_t) * 4);
493 /* si_emit_descriptors only accepts an array of arrays.
494 * This adds such an array. */
495 buffers
->desc_data
= CALLOC(num_buffers
, sizeof(uint32_t*));
496 for (i
= 0; i
< num_buffers
; i
++) {
497 buffers
->desc_data
[i
] = &buffers
->desc_storage
[i
*4];
500 si_init_descriptors(sctx
, &buffers
->desc
,
501 si_get_shader_user_data_base(shader
) +
502 shader_userdata_index
*4, 4, num_buffers
,
503 si_emit_buffer_resources
);
506 static void si_release_buffer_resources(struct si_buffer_resources
*buffers
)
510 for (i
= 0; i
< buffers
->num_buffers
; i
++) {
511 pipe_resource_reference(&buffers
->buffers
[i
], NULL
);
514 FREE(buffers
->buffers
);
515 FREE(buffers
->desc_storage
);
516 FREE(buffers
->desc_data
);
517 si_release_descriptors(&buffers
->desc
);
520 static void si_buffer_resources_begin_new_cs(struct si_context
*sctx
,
521 struct si_buffer_resources
*buffers
)
523 unsigned mask
= buffers
->desc
.enabled_mask
;
525 /* Add relocations to the CS. */
527 int i
= u_bit_scan(&mask
);
529 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
530 (struct r600_resource
*)buffers
->buffers
[i
],
531 buffers
->shader_usage
, buffers
->priority
);
534 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
535 buffers
->desc
.buffer
, RADEON_USAGE_READWRITE
,
536 RADEON_PRIO_SHADER_DATA
);
538 si_emit_shader_pointer(sctx
, &buffers
->desc
.atom
);
543 static void si_vertex_buffers_begin_new_cs(struct si_context
*sctx
)
545 struct si_descriptors
*desc
= &sctx
->vertex_buffers
;
546 int count
= sctx
->vertex_elements
? sctx
->vertex_elements
->count
: 0;
549 for (i
= 0; i
< count
; i
++) {
550 int vb
= sctx
->vertex_elements
->elements
[i
].vertex_buffer_index
;
552 if (vb
>= Elements(sctx
->vertex_buffer
))
554 if (!sctx
->vertex_buffer
[vb
].buffer
)
557 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
558 (struct r600_resource
*)sctx
->vertex_buffer
[vb
].buffer
,
559 RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BUFFER_RO
);
561 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
562 desc
->buffer
, RADEON_USAGE_READ
,
563 RADEON_PRIO_SHADER_DATA
);
565 si_emit_shader_pointer(sctx
, &desc
->atom
);
568 void si_update_vertex_buffers(struct si_context
*sctx
)
570 struct pipe_context
*ctx
= &sctx
->b
.b
;
571 struct si_descriptors
*desc
= &sctx
->vertex_buffers
;
572 bool bound
[SI_NUM_VERTEX_BUFFERS
] = {};
573 unsigned i
, count
= sctx
->vertex_elements
->count
;
577 if (!count
|| !sctx
->vertex_elements
)
580 /* Vertex buffer descriptors are the only ones which are uploaded
581 * directly through a staging buffer and don't go through
582 * the fine-grained upload path.
584 u_upload_alloc(sctx
->b
.uploader
, 0, count
* 16, &desc
->buffer_offset
,
585 (struct pipe_resource
**)&desc
->buffer
, (void**)&ptr
);
587 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
588 desc
->buffer
, RADEON_USAGE_READ
,
589 RADEON_PRIO_SHADER_DATA
);
591 assert(count
<= SI_NUM_VERTEX_BUFFERS
);
592 assert(desc
->current_context_id
== 0);
594 for (i
= 0; i
< count
; i
++) {
595 struct pipe_vertex_element
*ve
= &sctx
->vertex_elements
->elements
[i
];
596 struct pipe_vertex_buffer
*vb
;
597 struct r600_resource
*rbuffer
;
599 uint32_t *desc
= &ptr
[i
*4];
601 if (ve
->vertex_buffer_index
>= Elements(sctx
->vertex_buffer
)) {
606 vb
= &sctx
->vertex_buffer
[ve
->vertex_buffer_index
];
607 rbuffer
= (struct r600_resource
*)vb
->buffer
;
608 if (rbuffer
== NULL
) {
613 offset
= vb
->buffer_offset
+ ve
->src_offset
;
615 va
= r600_resource_va(ctx
->screen
, (void*)rbuffer
);
618 /* Fill in T# buffer resource description */
619 desc
[0] = va
& 0xFFFFFFFF;
620 desc
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
621 S_008F04_STRIDE(vb
->stride
);
623 /* Round up by rounding down and adding 1 */
624 desc
[2] = (vb
->buffer
->width0
- offset
-
625 sctx
->vertex_elements
->format_size
[i
]) /
628 desc
[2] = vb
->buffer
->width0
- offset
;
630 desc
[3] = sctx
->vertex_elements
->rsrc_word3
[i
];
632 if (!bound
[ve
->vertex_buffer_index
]) {
633 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
634 (struct r600_resource
*)vb
->buffer
,
635 RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BUFFER_RO
);
636 bound
[ve
->vertex_buffer_index
] = true;
640 desc
->atom
.num_dw
= 8; /* update 2 shader pointers (VS+ES) */
641 desc
->atom
.dirty
= true;
643 /* Don't flush the const cache. It would have a very negative effect
644 * on performance (confirmed by testing). New descriptors are always
645 * uploaded to a fresh new buffer, so I don't think flushing the const
646 * cache is needed. */
647 sctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
;
651 /* CONSTANT BUFFERS */
653 void si_upload_const_buffer(struct si_context
*sctx
, struct r600_resource
**rbuffer
,
654 const uint8_t *ptr
, unsigned size
, uint32_t *const_offset
)
660 if (!(tmpPtr
= malloc(size
))) {
661 R600_ERR("Failed to allocate BE swap buffer.\n");
665 for (i
= 0; i
< size
/ 4; ++i
) {
666 tmpPtr
[i
] = util_cpu_to_le32(((uint32_t *)ptr
)[i
]);
669 u_upload_data(sctx
->b
.uploader
, 0, size
, tmpPtr
, const_offset
,
670 (struct pipe_resource
**)rbuffer
);
674 u_upload_data(sctx
->b
.uploader
, 0, size
, ptr
, const_offset
,
675 (struct pipe_resource
**)rbuffer
);
679 static void si_set_constant_buffer(struct pipe_context
*ctx
, uint shader
, uint slot
,
680 struct pipe_constant_buffer
*input
)
682 struct si_context
*sctx
= (struct si_context
*)ctx
;
683 struct si_buffer_resources
*buffers
= &sctx
->const_buffers
[shader
];
685 if (shader
>= SI_NUM_SHADERS
)
688 assert(slot
< buffers
->num_buffers
);
689 pipe_resource_reference(&buffers
->buffers
[slot
], NULL
);
691 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
692 * with a NULL buffer). We need to use a dummy buffer instead. */
693 if (sctx
->b
.chip_class
== CIK
&&
694 (!input
|| (!input
->buffer
&& !input
->user_buffer
)))
695 input
= &sctx
->null_const_buf
;
697 if (input
&& (input
->buffer
|| input
->user_buffer
)) {
698 struct pipe_resource
*buffer
= NULL
;
701 /* Upload the user buffer if needed. */
702 if (input
->user_buffer
) {
703 unsigned buffer_offset
;
705 si_upload_const_buffer(sctx
,
706 (struct r600_resource
**)&buffer
, input
->user_buffer
,
707 input
->buffer_size
, &buffer_offset
);
708 va
= r600_resource_va(ctx
->screen
, buffer
) + buffer_offset
;
710 pipe_resource_reference(&buffer
, input
->buffer
);
711 va
= r600_resource_va(ctx
->screen
, buffer
) + input
->buffer_offset
;
714 /* Set the descriptor. */
715 uint32_t *desc
= buffers
->desc_data
[slot
];
717 desc
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
719 desc
[2] = input
->buffer_size
;
720 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
721 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
722 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
723 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
724 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
725 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
);
727 buffers
->buffers
[slot
] = buffer
;
728 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
729 (struct r600_resource
*)buffer
,
730 buffers
->shader_usage
, buffers
->priority
);
731 buffers
->desc
.enabled_mask
|= 1 << slot
;
733 /* Clear the descriptor. */
734 memset(buffers
->desc_data
[slot
], 0, sizeof(uint32_t) * 4);
735 buffers
->desc
.enabled_mask
&= ~(1 << slot
);
738 buffers
->desc
.dirty_mask
|= 1 << slot
;
739 si_update_descriptors(sctx
, &buffers
->desc
);
744 void si_set_ring_buffer(struct pipe_context
*ctx
, uint shader
, uint slot
,
745 struct pipe_constant_buffer
*input
,
746 unsigned stride
, unsigned num_records
,
747 bool add_tid
, bool swizzle
,
748 unsigned element_size
, unsigned index_stride
)
750 struct si_context
*sctx
= (struct si_context
*)ctx
;
751 struct si_buffer_resources
*buffers
= &sctx
->rw_buffers
[shader
];
753 if (shader
>= SI_NUM_SHADERS
)
756 /* The stride field in the resource descriptor has 14 bits */
757 assert(stride
< (1 << 14));
759 assert(slot
< buffers
->num_buffers
);
760 pipe_resource_reference(&buffers
->buffers
[slot
], NULL
);
762 if (input
&& input
->buffer
) {
765 va
= r600_resource_va(ctx
->screen
, input
->buffer
);
767 switch (element_size
) {
769 assert(!"Unsupported ring buffer element size");
785 switch (index_stride
) {
787 assert(!"Unsupported ring buffer index stride");
803 /* Set the descriptor. */
804 uint32_t *desc
= buffers
->desc_data
[slot
];
806 desc
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32) |
807 S_008F04_STRIDE(stride
) |
808 S_008F04_SWIZZLE_ENABLE(swizzle
);
809 desc
[2] = num_records
;
810 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
811 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
812 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
813 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
) |
814 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT
) |
815 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32
) |
816 S_008F0C_ELEMENT_SIZE(element_size
) |
817 S_008F0C_INDEX_STRIDE(index_stride
) |
818 S_008F0C_ADD_TID_ENABLE(add_tid
);
820 pipe_resource_reference(&buffers
->buffers
[slot
], input
->buffer
);
821 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
822 (struct r600_resource
*)input
->buffer
,
823 buffers
->shader_usage
, buffers
->priority
);
824 buffers
->desc
.enabled_mask
|= 1 << slot
;
826 /* Clear the descriptor. */
827 memset(buffers
->desc_data
[slot
], 0, sizeof(uint32_t) * 4);
828 buffers
->desc
.enabled_mask
&= ~(1 << slot
);
831 buffers
->desc
.dirty_mask
|= 1 << slot
;
832 si_update_descriptors(sctx
, &buffers
->desc
);
835 /* STREAMOUT BUFFERS */
837 static void si_set_streamout_targets(struct pipe_context
*ctx
,
838 unsigned num_targets
,
839 struct pipe_stream_output_target
**targets
,
840 const unsigned *offsets
)
842 struct si_context
*sctx
= (struct si_context
*)ctx
;
843 struct si_buffer_resources
*buffers
= &sctx
->rw_buffers
[PIPE_SHADER_VERTEX
];
844 unsigned old_num_targets
= sctx
->b
.streamout
.num_targets
;
847 /* Streamout buffers must be bound in 2 places:
848 * 1) in VGT by setting the VGT_STRMOUT registers
849 * 2) as shader resources
852 /* Set the VGT regs. */
853 r600_set_streamout_targets(ctx
, num_targets
, targets
, offsets
);
855 /* Set the shader resources.*/
856 for (i
= 0; i
< num_targets
; i
++) {
857 bufidx
= SI_SO_BUF_OFFSET
+ i
;
860 struct pipe_resource
*buffer
= targets
[i
]->buffer
;
861 uint64_t va
= r600_resource_va(ctx
->screen
, buffer
);
863 /* Set the descriptor. */
864 uint32_t *desc
= buffers
->desc_data
[bufidx
];
866 desc
[1] = S_008F04_BASE_ADDRESS_HI(va
>> 32);
867 desc
[2] = 0xffffffff;
868 desc
[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X
) |
869 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y
) |
870 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z
) |
871 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W
);
873 /* Set the resource. */
874 pipe_resource_reference(&buffers
->buffers
[bufidx
],
876 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
877 (struct r600_resource
*)buffer
,
878 buffers
->shader_usage
, buffers
->priority
);
879 buffers
->desc
.enabled_mask
|= 1 << bufidx
;
881 /* Clear the descriptor and unset the resource. */
882 memset(buffers
->desc_data
[bufidx
], 0,
883 sizeof(uint32_t) * 4);
884 pipe_resource_reference(&buffers
->buffers
[bufidx
],
886 buffers
->desc
.enabled_mask
&= ~(1 << bufidx
);
888 buffers
->desc
.dirty_mask
|= 1 << bufidx
;
890 for (; i
< old_num_targets
; i
++) {
891 bufidx
= SI_SO_BUF_OFFSET
+ i
;
892 /* Clear the descriptor and unset the resource. */
893 memset(buffers
->desc_data
[bufidx
], 0, sizeof(uint32_t) * 4);
894 pipe_resource_reference(&buffers
->buffers
[bufidx
], NULL
);
895 buffers
->desc
.enabled_mask
&= ~(1 << bufidx
);
896 buffers
->desc
.dirty_mask
|= 1 << bufidx
;
899 si_update_descriptors(sctx
, &buffers
->desc
);
902 static void si_desc_reset_buffer_offset(struct pipe_context
*ctx
,
903 uint32_t *desc
, uint64_t old_buf_va
,
904 struct pipe_resource
*new_buf
)
906 /* Retrieve the buffer offset from the descriptor. */
907 uint64_t old_desc_va
=
908 desc
[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc
[1]) << 32);
910 assert(old_buf_va
<= old_desc_va
);
911 uint64_t offset_within_buffer
= old_desc_va
- old_buf_va
;
913 /* Update the descriptor. */
914 uint64_t va
= r600_resource_va(ctx
->screen
, new_buf
) + offset_within_buffer
;
917 desc
[1] = (desc
[1] & C_008F04_BASE_ADDRESS_HI
) |
918 S_008F04_BASE_ADDRESS_HI(va
>> 32);
921 /* BUFFER DISCARD/INVALIDATION */
923 /* Reallocate a buffer a update all resource bindings where the buffer is
926 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
927 * idle by discarding its contents. Apps usually tell us when to do this using
928 * map_buffer flags, for example.
930 static void si_invalidate_buffer(struct pipe_context
*ctx
, struct pipe_resource
*buf
)
932 struct si_context
*sctx
= (struct si_context
*)ctx
;
933 struct r600_resource
*rbuffer
= r600_resource(buf
);
934 unsigned i
, shader
, alignment
= rbuffer
->buf
->alignment
;
935 uint64_t old_va
= r600_resource_va(ctx
->screen
, buf
);
937 /* Reallocate the buffer in the same pipe_resource. */
938 r600_init_resource(&sctx
->screen
->b
, rbuffer
, rbuffer
->b
.b
.width0
,
941 /* We changed the buffer, now we need to bind it where the old one
942 * was bound. This consists of 2 things:
943 * 1) Updating the resource descriptor and dirtying it.
944 * 2) Adding a relocation to the CS, so that it's usable.
947 /* Vertex buffers. */
948 /* Nothing to do. Vertex buffer bindings are updated before every draw call. */
950 /* Read/Write buffers. */
951 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++) {
952 struct si_buffer_resources
*buffers
= &sctx
->rw_buffers
[shader
];
954 uint32_t mask
= buffers
->desc
.enabled_mask
;
957 i
= u_bit_scan(&mask
);
958 if (buffers
->buffers
[i
] == buf
) {
959 si_desc_reset_buffer_offset(ctx
, buffers
->desc_data
[i
],
962 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
963 rbuffer
, buffers
->shader_usage
,
966 buffers
->desc
.dirty_mask
|= 1 << i
;
969 if (i
>= SI_SO_BUF_OFFSET
&& shader
== PIPE_SHADER_VERTEX
) {
970 /* Update the streamout state. */
971 if (sctx
->b
.streamout
.begin_emitted
) {
972 r600_emit_streamout_end(&sctx
->b
);
974 sctx
->b
.streamout
.append_bitmask
=
975 sctx
->b
.streamout
.enabled_mask
;
976 r600_streamout_buffers_dirty(&sctx
->b
);
981 si_update_descriptors(sctx
, &buffers
->desc
);
985 /* Constant buffers. */
986 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++) {
987 struct si_buffer_resources
*buffers
= &sctx
->const_buffers
[shader
];
989 uint32_t mask
= buffers
->desc
.enabled_mask
;
992 unsigned i
= u_bit_scan(&mask
);
993 if (buffers
->buffers
[i
] == buf
) {
994 si_desc_reset_buffer_offset(ctx
, buffers
->desc_data
[i
],
997 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
998 rbuffer
, buffers
->shader_usage
,
1001 buffers
->desc
.dirty_mask
|= 1 << i
;
1006 si_update_descriptors(sctx
, &buffers
->desc
);
1010 /* Texture buffers. */
1011 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++) {
1012 struct si_sampler_views
*views
= &sctx
->samplers
[shader
].views
;
1014 uint32_t mask
= views
->desc
.enabled_mask
;
1017 unsigned i
= u_bit_scan(&mask
);
1018 if (views
->views
[i
]->texture
== buf
) {
1019 /* This updates the sampler view directly. */
1020 si_desc_reset_buffer_offset(ctx
, views
->desc_data
[i
],
1023 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
1024 rbuffer
, RADEON_USAGE_READ
,
1025 RADEON_PRIO_SHADER_BUFFER_RO
);
1027 views
->desc
.dirty_mask
|= 1 << i
;
1032 si_update_descriptors(sctx
, &views
->desc
);
1039 /* The max number of bytes to copy per packet. */
1040 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
1042 static void si_clear_buffer(struct pipe_context
*ctx
, struct pipe_resource
*dst
,
1043 unsigned offset
, unsigned size
, unsigned value
)
1045 struct si_context
*sctx
= (struct si_context
*)ctx
;
1050 /* Mark the buffer range of destination as valid (initialized),
1051 * so that transfer_map knows it should wait for the GPU when mapping
1053 util_range_add(&r600_resource(dst
)->valid_buffer_range
, offset
,
1056 /* Fallback for unaligned clears. */
1057 if (offset
% 4 != 0 || size
% 4 != 0) {
1058 uint32_t *map
= sctx
->b
.ws
->buffer_map(r600_resource(dst
)->cs_buf
,
1059 sctx
->b
.rings
.gfx
.cs
,
1060 PIPE_TRANSFER_WRITE
);
1062 for (unsigned i
= 0; i
< size
; i
++)
1067 uint64_t va
= r600_resource_va(&sctx
->screen
->b
.b
, dst
) + offset
;
1069 /* Flush the caches where the resource is bound. */
1070 /* XXX only flush the caches where the buffer is bound. */
1071 sctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
1072 R600_CONTEXT_INV_CONST_CACHE
|
1073 R600_CONTEXT_FLUSH_AND_INV_CB
|
1074 R600_CONTEXT_FLUSH_AND_INV_DB
|
1075 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
1076 R600_CONTEXT_FLUSH_AND_INV_DB_META
;
1077 sctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
1080 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
1081 unsigned dma_flags
= 0;
1083 si_need_cs_space(sctx
, 7 + (sctx
->b
.flags
? sctx
->cache_flush
.num_dw
: 0),
1086 /* This must be done after need_cs_space. */
1087 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
,
1088 (struct r600_resource
*)dst
, RADEON_USAGE_WRITE
,
1091 /* Flush the caches for the first copy only.
1092 * Also wait for the previous CP DMA operations. */
1093 if (sctx
->b
.flags
) {
1094 si_emit_cache_flush(&sctx
->b
, NULL
);
1095 dma_flags
|= SI_CP_DMA_RAW_WAIT
; /* same as WAIT_UNTIL=CP_DMA_IDLE */
1098 /* Do the synchronization after the last copy, so that all data is written to memory. */
1099 if (size
== byte_count
)
1100 dma_flags
|= R600_CP_DMA_SYNC
;
1102 /* Emit the clear packet. */
1103 si_emit_cp_dma_clear_buffer(sctx
, va
, byte_count
, value
, dma_flags
);
1109 /* Flush the caches again in case the 3D engine has been prefetching
1111 /* XXX only flush the caches where the buffer is bound. */
1112 sctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
1113 R600_CONTEXT_INV_CONST_CACHE
|
1114 R600_CONTEXT_FLUSH_AND_INV_CB
|
1115 R600_CONTEXT_FLUSH_AND_INV_DB
|
1116 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
1117 R600_CONTEXT_FLUSH_AND_INV_DB_META
;
1120 void si_copy_buffer(struct si_context
*sctx
,
1121 struct pipe_resource
*dst
, struct pipe_resource
*src
,
1122 uint64_t dst_offset
, uint64_t src_offset
, unsigned size
)
1127 /* Mark the buffer range of destination as valid (initialized),
1128 * so that transfer_map knows it should wait for the GPU when mapping
1130 util_range_add(&r600_resource(dst
)->valid_buffer_range
, dst_offset
,
1133 dst_offset
+= r600_resource_va(&sctx
->screen
->b
.b
, dst
);
1134 src_offset
+= r600_resource_va(&sctx
->screen
->b
.b
, src
);
1136 /* Flush the caches where the resource is bound. */
1137 sctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
1138 R600_CONTEXT_INV_CONST_CACHE
|
1139 R600_CONTEXT_FLUSH_AND_INV_CB
|
1140 R600_CONTEXT_FLUSH_AND_INV_DB
|
1141 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
1142 R600_CONTEXT_FLUSH_AND_INV_DB_META
|
1143 R600_CONTEXT_WAIT_3D_IDLE
;
1146 unsigned sync_flags
= 0;
1147 unsigned byte_count
= MIN2(size
, CP_DMA_MAX_BYTE_COUNT
);
1149 si_need_cs_space(sctx
, 7 + (sctx
->b
.flags
? sctx
->cache_flush
.num_dw
: 0), FALSE
);
1151 /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
1152 if (sctx
->b
.flags
) {
1153 si_emit_cache_flush(&sctx
->b
, NULL
);
1154 sync_flags
|= SI_CP_DMA_RAW_WAIT
;
1157 /* Do the synchronization after the last copy, so that all data is written to memory. */
1158 if (size
== byte_count
) {
1159 sync_flags
|= R600_CP_DMA_SYNC
;
1162 /* This must be done after r600_need_cs_space. */
1163 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
, (struct r600_resource
*)src
,
1164 RADEON_USAGE_READ
, RADEON_PRIO_MIN
);
1165 r600_context_bo_reloc(&sctx
->b
, &sctx
->b
.rings
.gfx
, (struct r600_resource
*)dst
,
1166 RADEON_USAGE_WRITE
, RADEON_PRIO_MIN
);
1168 si_emit_cp_dma_copy_buffer(sctx
, dst_offset
, src_offset
, byte_count
, sync_flags
);
1171 src_offset
+= byte_count
;
1172 dst_offset
+= byte_count
;
1175 sctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
1176 R600_CONTEXT_INV_CONST_CACHE
|
1177 R600_CONTEXT_FLUSH_AND_INV_CB
|
1178 R600_CONTEXT_FLUSH_AND_INV_DB
|
1179 R600_CONTEXT_FLUSH_AND_INV_CB_META
|
1180 R600_CONTEXT_FLUSH_AND_INV_DB_META
;
1185 void si_init_all_descriptors(struct si_context
*sctx
)
1189 for (i
= 0; i
< SI_NUM_SHADERS
; i
++) {
1190 si_init_buffer_resources(sctx
, &sctx
->const_buffers
[i
],
1191 SI_NUM_CONST_BUFFERS
, i
, SI_SGPR_CONST
,
1192 RADEON_USAGE_READ
, RADEON_PRIO_SHADER_BUFFER_RO
);
1193 si_init_buffer_resources(sctx
, &sctx
->rw_buffers
[i
],
1194 i
== PIPE_SHADER_VERTEX
?
1195 SI_NUM_RW_BUFFERS
: SI_NUM_RING_BUFFERS
,
1196 i
, SI_SGPR_RW_BUFFERS
,
1197 RADEON_USAGE_READWRITE
, RADEON_PRIO_SHADER_RESOURCE_RW
);
1199 si_init_sampler_views(sctx
, &sctx
->samplers
[i
].views
, i
);
1201 si_init_descriptors(sctx
, &sctx
->samplers
[i
].states
.desc
,
1202 si_get_shader_user_data_base(i
) + SI_SGPR_SAMPLER
* 4,
1203 4, SI_NUM_SAMPLER_STATES
, si_emit_sampler_states
);
1205 sctx
->atoms
.s
.const_buffers
[i
] = &sctx
->const_buffers
[i
].desc
.atom
;
1206 sctx
->atoms
.s
.rw_buffers
[i
] = &sctx
->rw_buffers
[i
].desc
.atom
;
1207 sctx
->atoms
.s
.sampler_views
[i
] = &sctx
->samplers
[i
].views
.desc
.atom
;
1208 sctx
->atoms
.s
.sampler_states
[i
] = &sctx
->samplers
[i
].states
.desc
.atom
;
1211 si_init_descriptors(sctx
, &sctx
->vertex_buffers
,
1212 si_get_shader_user_data_base(PIPE_SHADER_VERTEX
) +
1213 SI_SGPR_VERTEX_BUFFER
*4, 4, SI_NUM_VERTEX_BUFFERS
,
1214 si_emit_shader_pointer
);
1215 sctx
->atoms
.s
.vertex_buffers
= &sctx
->vertex_buffers
.atom
;
1217 /* Set pipe_context functions. */
1218 sctx
->b
.b
.set_constant_buffer
= si_set_constant_buffer
;
1219 sctx
->b
.b
.set_sampler_views
= si_set_sampler_views
;
1220 sctx
->b
.b
.set_stream_output_targets
= si_set_streamout_targets
;
1221 sctx
->b
.clear_buffer
= si_clear_buffer
;
1222 sctx
->b
.invalidate_buffer
= si_invalidate_buffer
;
1225 void si_release_all_descriptors(struct si_context
*sctx
)
1229 for (i
= 0; i
< SI_NUM_SHADERS
; i
++) {
1230 si_release_buffer_resources(&sctx
->const_buffers
[i
]);
1231 si_release_buffer_resources(&sctx
->rw_buffers
[i
]);
1232 si_release_sampler_views(&sctx
->samplers
[i
].views
);
1233 si_release_descriptors(&sctx
->samplers
[i
].states
.desc
);
1235 si_release_descriptors(&sctx
->vertex_buffers
);
1238 void si_all_descriptors_begin_new_cs(struct si_context
*sctx
)
1242 for (i
= 0; i
< SI_NUM_SHADERS
; i
++) {
1243 si_buffer_resources_begin_new_cs(sctx
, &sctx
->const_buffers
[i
]);
1244 si_buffer_resources_begin_new_cs(sctx
, &sctx
->rw_buffers
[i
]);
1245 si_sampler_views_begin_new_cs(sctx
, &sctx
->samplers
[i
].views
);
1246 si_sampler_states_begin_new_cs(sctx
, &sctx
->samplers
[i
].states
);
1248 si_vertex_buffers_begin_new_cs(sctx
);