winsys/radeon: fold cs_set_flush_callback into cs_create
[mesa.git] / src / gallium / drivers / radeonsi / si_pipe.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_public.h"
26
27 #include "radeon/radeon_uvd.h"
28 #include "util/u_blitter.h"
29 #include "util/u_memory.h"
30 #include "util/u_simple_shaders.h"
31 #include "vl/vl_decoder.h"
32
33 /*
34 * pipe_context
35 */
36 void si_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
37 unsigned flags)
38 {
39 struct si_context *sctx = (struct si_context *)ctx;
40 struct pipe_query *render_cond = NULL;
41 boolean render_cond_cond = FALSE;
42 unsigned render_cond_mode = 0;
43
44 if (fence) {
45 *fence = sctx->b.ws->cs_create_fence(sctx->b.rings.gfx.cs);
46 }
47
48 /* Disable render condition. */
49 if (sctx->b.current_render_cond) {
50 render_cond = sctx->b.current_render_cond;
51 render_cond_cond = sctx->b.current_render_cond_cond;
52 render_cond_mode = sctx->b.current_render_cond_mode;
53 ctx->render_condition(ctx, NULL, FALSE, 0);
54 }
55
56 si_context_flush(sctx, flags);
57
58 /* Re-enable render condition. */
59 if (render_cond) {
60 ctx->render_condition(ctx, render_cond, render_cond_cond, render_cond_mode);
61 }
62 }
63
64 static void si_flush_from_st(struct pipe_context *ctx,
65 struct pipe_fence_handle **fence,
66 unsigned flags)
67 {
68 struct si_context *sctx = (struct si_context *)ctx;
69 unsigned rflags = 0;
70
71 if (flags & PIPE_FLUSH_END_OF_FRAME)
72 rflags |= RADEON_FLUSH_END_OF_FRAME;
73
74 if (sctx->b.rings.dma.cs) {
75 sctx->b.rings.dma.flush(sctx, rflags);
76 }
77
78 si_flush(ctx, fence, rflags);
79 }
80
81 static void si_flush_gfx_ring(void *ctx, unsigned flags)
82 {
83 si_flush((struct pipe_context*)ctx, NULL, flags);
84 }
85
86 static void si_destroy_context(struct pipe_context *context)
87 {
88 struct si_context *sctx = (struct si_context *)context;
89
90 si_release_all_descriptors(sctx);
91
92 pipe_resource_reference(&sctx->null_const_buf.buffer, NULL);
93 r600_resource_reference(&sctx->border_color_table, NULL);
94
95 si_pm4_delete_state(sctx, gs_rings, sctx->gs_rings);
96 si_pm4_delete_state(sctx, gs_onoff, sctx->gs_on);
97 si_pm4_delete_state(sctx, gs_onoff, sctx->gs_off);
98
99 if (sctx->dummy_pixel_shader) {
100 sctx->b.b.delete_fs_state(&sctx->b.b, sctx->dummy_pixel_shader);
101 }
102 for (int i = 0; i < 8; i++) {
103 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_depth_stencil[i]);
104 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_depth[i]);
105 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_stencil[i]);
106 }
107 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_inplace);
108 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_resolve);
109 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_decompress);
110 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_fastclear);
111 util_unreference_framebuffer_state(&sctx->framebuffer.state);
112
113 util_blitter_destroy(sctx->blitter);
114
115 si_pm4_cleanup(sctx);
116
117 r600_common_context_cleanup(&sctx->b);
118 FREE(sctx);
119 }
120
121 static struct pipe_context *si_create_context(struct pipe_screen *screen, void *priv)
122 {
123 struct si_context *sctx = CALLOC_STRUCT(si_context);
124 struct si_screen* sscreen = (struct si_screen *)screen;
125 struct radeon_winsys *ws = sscreen->b.ws;
126 int shader, i;
127
128 if (sctx == NULL)
129 return NULL;
130
131 sctx->b.b.screen = screen; /* this must be set first */
132 sctx->b.b.priv = priv;
133 sctx->b.b.destroy = si_destroy_context;
134 sctx->b.b.flush = si_flush_from_st;
135 sctx->screen = sscreen; /* Easy accessing of screen/winsys. */
136
137 if (!r600_common_context_init(&sctx->b, &sscreen->b))
138 goto fail;
139
140 si_init_blit_functions(sctx);
141 si_init_compute_functions(sctx);
142
143 if (sscreen->b.info.has_uvd) {
144 sctx->b.b.create_video_codec = si_uvd_create_decoder;
145 sctx->b.b.create_video_buffer = si_video_buffer_create;
146 } else {
147 sctx->b.b.create_video_codec = vl_create_decoder;
148 sctx->b.b.create_video_buffer = vl_video_buffer_create;
149 }
150
151 sctx->b.rings.gfx.cs = ws->cs_create(ws, RING_GFX, si_flush_gfx_ring,
152 sctx, NULL);
153 sctx->b.rings.gfx.flush = si_flush_gfx_ring;
154
155 si_init_all_descriptors(sctx);
156
157 /* Initialize cache_flush. */
158 sctx->cache_flush = si_atom_cache_flush;
159 sctx->atoms.cache_flush = &sctx->cache_flush;
160
161 sctx->atoms.streamout_begin = &sctx->b.streamout.begin_atom;
162 sctx->atoms.streamout_enable = &sctx->b.streamout.enable_atom;
163
164 switch (sctx->b.chip_class) {
165 case SI:
166 case CIK:
167 si_init_state_functions(sctx);
168 si_init_config(sctx);
169 break;
170 default:
171 R600_ERR("Unsupported chip class %d.\n", sctx->b.chip_class);
172 goto fail;
173 }
174
175 sctx->blitter = util_blitter_create(&sctx->b.b);
176 if (sctx->blitter == NULL)
177 goto fail;
178
179 sctx->dummy_pixel_shader =
180 util_make_fragment_cloneinput_shader(&sctx->b.b, 0,
181 TGSI_SEMANTIC_GENERIC,
182 TGSI_INTERPOLATE_CONSTANT);
183 sctx->b.b.bind_fs_state(&sctx->b.b, sctx->dummy_pixel_shader);
184
185 /* these must be last */
186 si_begin_new_cs(sctx);
187 r600_query_init_backend_mask(&sctx->b); /* this emits commands and must be last */
188
189 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
190 * with a NULL buffer). We need to use a dummy buffer instead. */
191 if (sctx->b.chip_class == CIK) {
192 sctx->null_const_buf.buffer = pipe_buffer_create(screen, PIPE_BIND_CONSTANT_BUFFER,
193 PIPE_USAGE_DEFAULT, 16);
194 sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0;
195
196 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
197 for (i = 0; i < NUM_CONST_BUFFERS; i++) {
198 sctx->b.b.set_constant_buffer(&sctx->b.b, shader, i,
199 &sctx->null_const_buf);
200 }
201 }
202
203 /* Clear the NULL constant buffer, because loads should return zeros. */
204 sctx->b.clear_buffer(&sctx->b.b, sctx->null_const_buf.buffer, 0,
205 sctx->null_const_buf.buffer->width0, 0);
206 }
207
208 return &sctx->b.b;
209 fail:
210 si_destroy_context(&sctx->b.b);
211 return NULL;
212 }
213
214 /*
215 * pipe_screen
216 */
217
218 static int si_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
219 {
220 struct si_screen *sscreen = (struct si_screen *)pscreen;
221
222 switch (param) {
223 /* Supported features (boolean caps). */
224 case PIPE_CAP_TWO_SIDED_STENCIL:
225 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
226 case PIPE_CAP_ANISOTROPIC_FILTER:
227 case PIPE_CAP_POINT_SPRITE:
228 case PIPE_CAP_OCCLUSION_QUERY:
229 case PIPE_CAP_TEXTURE_SHADOW_MAP:
230 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
231 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
232 case PIPE_CAP_TEXTURE_SWIZZLE:
233 case PIPE_CAP_DEPTH_CLIP_DISABLE:
234 case PIPE_CAP_SHADER_STENCIL_EXPORT:
235 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
236 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
237 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
238 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
239 case PIPE_CAP_SM3:
240 case PIPE_CAP_SEAMLESS_CUBE_MAP:
241 case PIPE_CAP_PRIMITIVE_RESTART:
242 case PIPE_CAP_CONDITIONAL_RENDER:
243 case PIPE_CAP_TEXTURE_BARRIER:
244 case PIPE_CAP_INDEP_BLEND_ENABLE:
245 case PIPE_CAP_INDEP_BLEND_FUNC:
246 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
247 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
248 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
249 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
250 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
251 case PIPE_CAP_USER_INDEX_BUFFERS:
252 case PIPE_CAP_USER_CONSTANT_BUFFERS:
253 case PIPE_CAP_START_INSTANCE:
254 case PIPE_CAP_NPOT_TEXTURES:
255 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
256 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
257 case PIPE_CAP_TGSI_INSTANCEID:
258 case PIPE_CAP_COMPUTE:
259 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
260 case PIPE_CAP_TGSI_VS_LAYER:
261 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
262 case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
263 return 1;
264
265 case PIPE_CAP_TEXTURE_MULTISAMPLE:
266 /* 2D tiling on CIK is supported since DRM 2.35.0 */
267 return HAVE_LLVM >= 0x0304 && (sscreen->b.chip_class < CIK ||
268 sscreen->b.info.drm_minor >= 35);
269
270 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
271 return R600_MAP_BUFFER_ALIGNMENT;
272
273 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
274 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
275 return 4;
276
277 case PIPE_CAP_GLSL_FEATURE_LEVEL:
278 return HAVE_LLVM >= 0x0305 ? 330 : 140;
279
280 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
281 return MIN2(sscreen->b.info.vram_size, 0xFFFFFFFF);
282
283 /* Unsupported features. */
284 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
285 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
286 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
287 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
288 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
289 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
290 case PIPE_CAP_USER_VERTEX_BUFFERS:
291 case PIPE_CAP_CUBE_MAP_ARRAY:
292 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
293 case PIPE_CAP_TEXTURE_GATHER_SM5:
294 case PIPE_CAP_TGSI_TEXCOORD:
295 case PIPE_CAP_FAKE_SW_MSAA:
296 case PIPE_CAP_TEXTURE_QUERY_LOD:
297 return 0;
298
299 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
300 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600;
301
302 /* Stream output. */
303 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
304 return sscreen->b.has_streamout ? 4 : 0;
305 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
306 return sscreen->b.has_streamout ? 1 : 0;
307 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
308 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
309 return sscreen->b.has_streamout ? 32*4 : 0;
310
311 /* Geometry shader output. */
312 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
313 return 1024;
314 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
315 return 4095;
316
317 /* Texturing. */
318 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
319 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
320 return 15; /* 16384 */
321 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
322 /* textures support 8192, but layered rendering supports 2048 */
323 return 12;
324 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
325 /* textures support 8192, but layered rendering supports 2048 */
326 return 2048;
327
328 /* Render targets. */
329 case PIPE_CAP_MAX_RENDER_TARGETS:
330 return 8;
331
332 case PIPE_CAP_MAX_VIEWPORTS:
333 return 1;
334
335 /* Timer queries, present when the clock frequency is non zero. */
336 case PIPE_CAP_QUERY_TIMESTAMP:
337 case PIPE_CAP_QUERY_TIME_ELAPSED:
338 return sscreen->b.info.r600_clock_crystal_freq != 0;
339
340 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
341 case PIPE_CAP_MIN_TEXEL_OFFSET:
342 return -8;
343
344 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
345 case PIPE_CAP_MAX_TEXEL_OFFSET:
346 return 7;
347 case PIPE_CAP_ENDIANNESS:
348 return PIPE_ENDIAN_LITTLE;
349 }
350 return 0;
351 }
352
353 static int si_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
354 {
355 switch(shader)
356 {
357 case PIPE_SHADER_FRAGMENT:
358 case PIPE_SHADER_VERTEX:
359 break;
360 case PIPE_SHADER_GEOMETRY:
361 #if HAVE_LLVM < 0x0305
362 return 0;
363 #endif
364 break;
365 case PIPE_SHADER_COMPUTE:
366 switch (param) {
367 case PIPE_SHADER_CAP_PREFERRED_IR:
368 return PIPE_SHADER_IR_LLVM;
369 default:
370 return 0;
371 }
372 default:
373 /* TODO: support tessellation */
374 return 0;
375 }
376
377 switch (param) {
378 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
379 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
380 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
381 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
382 return 16384;
383 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
384 return 32;
385 case PIPE_SHADER_CAP_MAX_INPUTS:
386 return 32;
387 case PIPE_SHADER_CAP_MAX_TEMPS:
388 return 256; /* Max native temporaries. */
389 case PIPE_SHADER_CAP_MAX_ADDRS:
390 /* FIXME Isn't this equal to TEMPS? */
391 return 1; /* Max native address registers */
392 case PIPE_SHADER_CAP_MAX_CONSTS:
393 return 4096; /* actually only memory limits this */
394 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
395 return NUM_PIPE_CONST_BUFFERS;
396 case PIPE_SHADER_CAP_MAX_PREDS:
397 return 0; /* FIXME */
398 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
399 return 1;
400 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
401 return 0;
402 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
403 /* Indirection of geometry shader input dimension is not
404 * handled yet
405 */
406 return shader < PIPE_SHADER_GEOMETRY;
407 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
408 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
409 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
410 return 1;
411 case PIPE_SHADER_CAP_INTEGERS:
412 return 1;
413 case PIPE_SHADER_CAP_SUBROUTINES:
414 return 0;
415 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
416 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
417 return 16;
418 case PIPE_SHADER_CAP_PREFERRED_IR:
419 return PIPE_SHADER_IR_TGSI;
420 }
421 return 0;
422 }
423
424 static void si_destroy_screen(struct pipe_screen* pscreen)
425 {
426 struct si_screen *sscreen = (struct si_screen *)pscreen;
427
428 if (sscreen == NULL)
429 return;
430
431 if (!sscreen->b.ws->unref(sscreen->b.ws))
432 return;
433
434 r600_destroy_common_screen(&sscreen->b);
435 }
436
437 struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
438 {
439 struct si_screen *sscreen = CALLOC_STRUCT(si_screen);
440 if (sscreen == NULL) {
441 return NULL;
442 }
443
444 /* Set functions first. */
445 sscreen->b.b.context_create = si_create_context;
446 sscreen->b.b.destroy = si_destroy_screen;
447 sscreen->b.b.get_param = si_get_param;
448 sscreen->b.b.get_shader_param = si_get_shader_param;
449 sscreen->b.b.is_format_supported = si_is_format_supported;
450 sscreen->b.b.resource_create = r600_resource_create_common;
451
452 if (!r600_common_screen_init(&sscreen->b, ws)) {
453 FREE(sscreen);
454 return NULL;
455 }
456
457 sscreen->b.has_cp_dma = true;
458 sscreen->b.has_streamout = HAVE_LLVM >= 0x0304;
459
460 if (debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE))
461 sscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS;
462
463 /* Create the auxiliary context. This must be done last. */
464 sscreen->b.aux_context = sscreen->b.b.context_create(&sscreen->b.b, NULL);
465
466 return &sscreen->b.b;
467 }