mesa/soft/llvmpipe: add fake MSAA support
[mesa.git] / src / gallium / drivers / radeonsi / si_pipe.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_public.h"
26
27 #include "radeon/radeon_uvd.h"
28 #include "util/u_blitter.h"
29 #include "util/u_memory.h"
30 #include "util/u_simple_shaders.h"
31 #include "vl/vl_decoder.h"
32
33 /*
34 * pipe_context
35 */
36 void si_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
37 unsigned flags)
38 {
39 struct si_context *sctx = (struct si_context *)ctx;
40 struct pipe_query *render_cond = NULL;
41 boolean render_cond_cond = FALSE;
42 unsigned render_cond_mode = 0;
43
44 if (fence) {
45 *fence = sctx->b.ws->cs_create_fence(sctx->b.rings.gfx.cs);
46 }
47
48 /* Disable render condition. */
49 if (sctx->b.current_render_cond) {
50 render_cond = sctx->b.current_render_cond;
51 render_cond_cond = sctx->b.current_render_cond_cond;
52 render_cond_mode = sctx->b.current_render_cond_mode;
53 ctx->render_condition(ctx, NULL, FALSE, 0);
54 }
55
56 si_context_flush(sctx, flags);
57
58 /* Re-enable render condition. */
59 if (render_cond) {
60 ctx->render_condition(ctx, render_cond, render_cond_cond, render_cond_mode);
61 }
62 }
63
64 static void si_flush_from_st(struct pipe_context *ctx,
65 struct pipe_fence_handle **fence,
66 unsigned flags)
67 {
68 struct si_context *sctx = (struct si_context *)ctx;
69
70 if (sctx->b.rings.dma.cs) {
71 sctx->b.rings.dma.flush(sctx,
72 flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0);
73 }
74
75 si_flush(ctx, fence,
76 flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0);
77 }
78
79 static void si_flush_from_winsys(void *ctx, unsigned flags)
80 {
81 si_flush((struct pipe_context*)ctx, NULL, flags);
82 }
83
84 static void si_destroy_context(struct pipe_context *context)
85 {
86 struct si_context *sctx = (struct si_context *)context;
87
88 si_release_all_descriptors(sctx);
89
90 pipe_resource_reference(&sctx->null_const_buf.buffer, NULL);
91 r600_resource_reference(&sctx->border_color_table, NULL);
92
93 si_pm4_delete_state(sctx, gs_rings, sctx->gs_rings);
94 si_pm4_delete_state(sctx, gs_onoff, sctx->gs_on);
95 si_pm4_delete_state(sctx, gs_onoff, sctx->gs_off);
96
97 if (sctx->dummy_pixel_shader) {
98 sctx->b.b.delete_fs_state(&sctx->b.b, sctx->dummy_pixel_shader);
99 }
100 for (int i = 0; i < 8; i++) {
101 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_depth_stencil[i]);
102 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_depth[i]);
103 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_stencil[i]);
104 }
105 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_inplace);
106 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_resolve);
107 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_decompress);
108 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_fastclear);
109 util_unreference_framebuffer_state(&sctx->framebuffer.state);
110
111 util_blitter_destroy(sctx->blitter);
112
113 si_pm4_cleanup(sctx);
114
115 r600_common_context_cleanup(&sctx->b);
116 FREE(sctx);
117 }
118
119 static struct pipe_context *si_create_context(struct pipe_screen *screen, void *priv)
120 {
121 struct si_context *sctx = CALLOC_STRUCT(si_context);
122 struct si_screen* sscreen = (struct si_screen *)screen;
123 int shader, i;
124
125 if (sctx == NULL)
126 return NULL;
127
128 sctx->b.b.screen = screen; /* this must be set first */
129 sctx->b.b.priv = priv;
130 sctx->b.b.destroy = si_destroy_context;
131 sctx->b.b.flush = si_flush_from_st;
132 sctx->screen = sscreen; /* Easy accessing of screen/winsys. */
133
134 if (!r600_common_context_init(&sctx->b, &sscreen->b))
135 goto fail;
136
137 si_init_blit_functions(sctx);
138 si_init_compute_functions(sctx);
139
140 if (sscreen->b.info.has_uvd) {
141 sctx->b.b.create_video_codec = si_uvd_create_decoder;
142 sctx->b.b.create_video_buffer = si_video_buffer_create;
143 } else {
144 sctx->b.b.create_video_codec = vl_create_decoder;
145 sctx->b.b.create_video_buffer = vl_video_buffer_create;
146 }
147
148 sctx->b.rings.gfx.cs = sctx->b.ws->cs_create(sctx->b.ws, RING_GFX, NULL);
149 sctx->b.rings.gfx.flush = si_flush_from_winsys;
150
151 si_init_all_descriptors(sctx);
152
153 /* Initialize cache_flush. */
154 sctx->cache_flush = si_atom_cache_flush;
155 sctx->atoms.cache_flush = &sctx->cache_flush;
156
157 sctx->atoms.streamout_begin = &sctx->b.streamout.begin_atom;
158 sctx->atoms.streamout_enable = &sctx->b.streamout.enable_atom;
159
160 switch (sctx->b.chip_class) {
161 case SI:
162 case CIK:
163 si_init_state_functions(sctx);
164 si_init_config(sctx);
165 break;
166 default:
167 R600_ERR("Unsupported chip class %d.\n", sctx->b.chip_class);
168 goto fail;
169 }
170
171 sctx->b.ws->cs_set_flush_callback(sctx->b.rings.gfx.cs, si_flush_from_winsys, sctx);
172
173 sctx->blitter = util_blitter_create(&sctx->b.b);
174 if (sctx->blitter == NULL)
175 goto fail;
176
177 sctx->dummy_pixel_shader =
178 util_make_fragment_cloneinput_shader(&sctx->b.b, 0,
179 TGSI_SEMANTIC_GENERIC,
180 TGSI_INTERPOLATE_CONSTANT);
181 sctx->b.b.bind_fs_state(&sctx->b.b, sctx->dummy_pixel_shader);
182
183 /* these must be last */
184 si_begin_new_cs(sctx);
185 r600_query_init_backend_mask(&sctx->b); /* this emits commands and must be last */
186
187 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
188 * with a NULL buffer). We need to use a dummy buffer instead. */
189 if (sctx->b.chip_class == CIK) {
190 sctx->null_const_buf.buffer = pipe_buffer_create(screen, PIPE_BIND_CONSTANT_BUFFER,
191 PIPE_USAGE_DEFAULT, 16);
192 sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0;
193
194 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
195 for (i = 0; i < NUM_CONST_BUFFERS; i++) {
196 sctx->b.b.set_constant_buffer(&sctx->b.b, shader, i,
197 &sctx->null_const_buf);
198 }
199 }
200
201 /* Clear the NULL constant buffer, because loads should return zeros. */
202 sctx->b.clear_buffer(&sctx->b.b, sctx->null_const_buf.buffer, 0,
203 sctx->null_const_buf.buffer->width0, 0);
204 }
205
206 return &sctx->b.b;
207 fail:
208 si_destroy_context(&sctx->b.b);
209 return NULL;
210 }
211
212 /*
213 * pipe_screen
214 */
215
216 static int si_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
217 {
218 struct si_screen *sscreen = (struct si_screen *)pscreen;
219
220 switch (param) {
221 /* Supported features (boolean caps). */
222 case PIPE_CAP_TWO_SIDED_STENCIL:
223 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
224 case PIPE_CAP_ANISOTROPIC_FILTER:
225 case PIPE_CAP_POINT_SPRITE:
226 case PIPE_CAP_OCCLUSION_QUERY:
227 case PIPE_CAP_TEXTURE_SHADOW_MAP:
228 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
229 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
230 case PIPE_CAP_TEXTURE_SWIZZLE:
231 case PIPE_CAP_DEPTH_CLIP_DISABLE:
232 case PIPE_CAP_SHADER_STENCIL_EXPORT:
233 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
234 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
235 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
236 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
237 case PIPE_CAP_SM3:
238 case PIPE_CAP_SEAMLESS_CUBE_MAP:
239 case PIPE_CAP_PRIMITIVE_RESTART:
240 case PIPE_CAP_CONDITIONAL_RENDER:
241 case PIPE_CAP_TEXTURE_BARRIER:
242 case PIPE_CAP_INDEP_BLEND_ENABLE:
243 case PIPE_CAP_INDEP_BLEND_FUNC:
244 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
245 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
246 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
247 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
248 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
249 case PIPE_CAP_USER_INDEX_BUFFERS:
250 case PIPE_CAP_USER_CONSTANT_BUFFERS:
251 case PIPE_CAP_START_INSTANCE:
252 case PIPE_CAP_NPOT_TEXTURES:
253 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
254 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
255 case PIPE_CAP_TGSI_INSTANCEID:
256 case PIPE_CAP_COMPUTE:
257 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
258 case PIPE_CAP_TGSI_VS_LAYER:
259 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
260 case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
261 return 1;
262
263 case PIPE_CAP_TEXTURE_MULTISAMPLE:
264 /* 2D tiling on CIK is supported since DRM 2.35.0 */
265 return HAVE_LLVM >= 0x0304 && (sscreen->b.chip_class < CIK ||
266 sscreen->b.info.drm_minor >= 35);
267
268 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
269 return R600_MAP_BUFFER_ALIGNMENT;
270
271 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
272 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
273 return 4;
274
275 case PIPE_CAP_GLSL_FEATURE_LEVEL:
276 return HAVE_LLVM >= 0x0305 ? 330 : 140;
277
278 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
279 return MIN2(sscreen->b.info.vram_size, 0xFFFFFFFF);
280
281 /* Unsupported features. */
282 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
283 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
284 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
285 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
286 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
287 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
288 case PIPE_CAP_USER_VERTEX_BUFFERS:
289 case PIPE_CAP_CUBE_MAP_ARRAY:
290 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
291 case PIPE_CAP_TEXTURE_GATHER_SM5:
292 case PIPE_CAP_TGSI_TEXCOORD:
293 case PIPE_CAP_FAKE_SW_MSAA:
294 return 0;
295
296 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
297 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600;
298
299 /* Stream output. */
300 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
301 return sscreen->b.has_streamout ? 4 : 0;
302 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
303 return sscreen->b.has_streamout ? 1 : 0;
304 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
305 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
306 return sscreen->b.has_streamout ? 32*4 : 0;
307
308 /* Geometry shader output. */
309 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
310 return 1024;
311 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
312 return 4095;
313
314 /* Texturing. */
315 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
316 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
317 return 15; /* 16384 */
318 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
319 /* textures support 8192, but layered rendering supports 2048 */
320 return 12;
321 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
322 /* textures support 8192, but layered rendering supports 2048 */
323 return 2048;
324
325 /* Render targets. */
326 case PIPE_CAP_MAX_RENDER_TARGETS:
327 return 8;
328
329 case PIPE_CAP_MAX_VIEWPORTS:
330 return 1;
331
332 /* Timer queries, present when the clock frequency is non zero. */
333 case PIPE_CAP_QUERY_TIMESTAMP:
334 case PIPE_CAP_QUERY_TIME_ELAPSED:
335 return sscreen->b.info.r600_clock_crystal_freq != 0;
336
337 case PIPE_CAP_MIN_TEXEL_OFFSET:
338 return -8;
339
340 case PIPE_CAP_MAX_TEXEL_OFFSET:
341 return 7;
342 case PIPE_CAP_ENDIANNESS:
343 return PIPE_ENDIAN_LITTLE;
344 }
345 return 0;
346 }
347
348 static int si_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
349 {
350 switch(shader)
351 {
352 case PIPE_SHADER_FRAGMENT:
353 case PIPE_SHADER_VERTEX:
354 break;
355 case PIPE_SHADER_GEOMETRY:
356 #if HAVE_LLVM < 0x0305
357 return 0;
358 #endif
359 break;
360 case PIPE_SHADER_COMPUTE:
361 switch (param) {
362 case PIPE_SHADER_CAP_PREFERRED_IR:
363 return PIPE_SHADER_IR_LLVM;
364 default:
365 return 0;
366 }
367 default:
368 /* TODO: support tessellation */
369 return 0;
370 }
371
372 switch (param) {
373 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
374 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
375 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
376 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
377 return 16384;
378 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
379 return 32;
380 case PIPE_SHADER_CAP_MAX_INPUTS:
381 return 32;
382 case PIPE_SHADER_CAP_MAX_TEMPS:
383 return 256; /* Max native temporaries. */
384 case PIPE_SHADER_CAP_MAX_ADDRS:
385 /* FIXME Isn't this equal to TEMPS? */
386 return 1; /* Max native address registers */
387 case PIPE_SHADER_CAP_MAX_CONSTS:
388 return 4096; /* actually only memory limits this */
389 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
390 return NUM_PIPE_CONST_BUFFERS;
391 case PIPE_SHADER_CAP_MAX_PREDS:
392 return 0; /* FIXME */
393 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
394 return 1;
395 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
396 return 0;
397 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
398 /* Indirection of geometry shader input dimension is not
399 * handled yet
400 */
401 return shader < PIPE_SHADER_GEOMETRY;
402 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
403 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
404 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
405 return 1;
406 case PIPE_SHADER_CAP_INTEGERS:
407 return 1;
408 case PIPE_SHADER_CAP_SUBROUTINES:
409 return 0;
410 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
411 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
412 return 16;
413 case PIPE_SHADER_CAP_PREFERRED_IR:
414 return PIPE_SHADER_IR_TGSI;
415 }
416 return 0;
417 }
418
419 static void si_destroy_screen(struct pipe_screen* pscreen)
420 {
421 struct si_screen *sscreen = (struct si_screen *)pscreen;
422
423 if (sscreen == NULL)
424 return;
425
426 if (!radeon_winsys_unref(sscreen->b.ws))
427 return;
428
429 r600_destroy_common_screen(&sscreen->b);
430 }
431
432 struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
433 {
434 struct si_screen *sscreen = CALLOC_STRUCT(si_screen);
435 if (sscreen == NULL) {
436 return NULL;
437 }
438
439 /* Set functions first. */
440 sscreen->b.b.context_create = si_create_context;
441 sscreen->b.b.destroy = si_destroy_screen;
442 sscreen->b.b.get_param = si_get_param;
443 sscreen->b.b.get_shader_param = si_get_shader_param;
444 sscreen->b.b.is_format_supported = si_is_format_supported;
445 sscreen->b.b.resource_create = r600_resource_create_common;
446
447 if (!r600_common_screen_init(&sscreen->b, ws)) {
448 FREE(sscreen);
449 return NULL;
450 }
451
452 sscreen->b.has_cp_dma = true;
453 sscreen->b.has_streamout = HAVE_LLVM >= 0x0304;
454
455 if (debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE))
456 sscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS;
457
458 /* Create the auxiliary context. This must be done last. */
459 sscreen->b.aux_context = sscreen->b.b.context_create(&sscreen->b.b, NULL);
460
461 return &sscreen->b.b;
462 }