gallium: add support for LODQ opcodes.
[mesa.git] / src / gallium / drivers / radeonsi / si_pipe.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "si_pipe.h"
25 #include "si_public.h"
26
27 #include "radeon/radeon_uvd.h"
28 #include "util/u_blitter.h"
29 #include "util/u_memory.h"
30 #include "util/u_simple_shaders.h"
31 #include "vl/vl_decoder.h"
32
33 /*
34 * pipe_context
35 */
36 void si_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
37 unsigned flags)
38 {
39 struct si_context *sctx = (struct si_context *)ctx;
40 struct pipe_query *render_cond = NULL;
41 boolean render_cond_cond = FALSE;
42 unsigned render_cond_mode = 0;
43
44 if (fence) {
45 *fence = sctx->b.ws->cs_create_fence(sctx->b.rings.gfx.cs);
46 }
47
48 /* Disable render condition. */
49 if (sctx->b.current_render_cond) {
50 render_cond = sctx->b.current_render_cond;
51 render_cond_cond = sctx->b.current_render_cond_cond;
52 render_cond_mode = sctx->b.current_render_cond_mode;
53 ctx->render_condition(ctx, NULL, FALSE, 0);
54 }
55
56 si_context_flush(sctx, flags);
57
58 /* Re-enable render condition. */
59 if (render_cond) {
60 ctx->render_condition(ctx, render_cond, render_cond_cond, render_cond_mode);
61 }
62 }
63
64 static void si_flush_from_st(struct pipe_context *ctx,
65 struct pipe_fence_handle **fence,
66 unsigned flags)
67 {
68 struct si_context *sctx = (struct si_context *)ctx;
69
70 if (sctx->b.rings.dma.cs) {
71 sctx->b.rings.dma.flush(sctx,
72 flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0);
73 }
74
75 si_flush(ctx, fence,
76 flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0);
77 }
78
79 static void si_flush_from_winsys(void *ctx, unsigned flags)
80 {
81 si_flush((struct pipe_context*)ctx, NULL, flags);
82 }
83
84 static void si_destroy_context(struct pipe_context *context)
85 {
86 struct si_context *sctx = (struct si_context *)context;
87
88 si_release_all_descriptors(sctx);
89
90 pipe_resource_reference(&sctx->null_const_buf.buffer, NULL);
91 r600_resource_reference(&sctx->border_color_table, NULL);
92
93 si_pm4_delete_state(sctx, gs_rings, sctx->gs_rings);
94 si_pm4_delete_state(sctx, gs_onoff, sctx->gs_on);
95 si_pm4_delete_state(sctx, gs_onoff, sctx->gs_off);
96
97 if (sctx->dummy_pixel_shader) {
98 sctx->b.b.delete_fs_state(&sctx->b.b, sctx->dummy_pixel_shader);
99 }
100 for (int i = 0; i < 8; i++) {
101 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_depth_stencil[i]);
102 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_depth[i]);
103 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_stencil[i]);
104 }
105 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush_inplace);
106 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_resolve);
107 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_decompress);
108 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_fastclear);
109 util_unreference_framebuffer_state(&sctx->framebuffer.state);
110
111 util_blitter_destroy(sctx->blitter);
112
113 si_pm4_cleanup(sctx);
114
115 r600_common_context_cleanup(&sctx->b);
116 FREE(sctx);
117 }
118
119 static struct pipe_context *si_create_context(struct pipe_screen *screen, void *priv)
120 {
121 struct si_context *sctx = CALLOC_STRUCT(si_context);
122 struct si_screen* sscreen = (struct si_screen *)screen;
123 int shader, i;
124
125 if (sctx == NULL)
126 return NULL;
127
128 sctx->b.b.screen = screen; /* this must be set first */
129 sctx->b.b.priv = priv;
130 sctx->b.b.destroy = si_destroy_context;
131 sctx->b.b.flush = si_flush_from_st;
132 sctx->screen = sscreen; /* Easy accessing of screen/winsys. */
133
134 if (!r600_common_context_init(&sctx->b, &sscreen->b))
135 goto fail;
136
137 si_init_blit_functions(sctx);
138 si_init_compute_functions(sctx);
139
140 if (sscreen->b.info.has_uvd) {
141 sctx->b.b.create_video_codec = si_uvd_create_decoder;
142 sctx->b.b.create_video_buffer = si_video_buffer_create;
143 } else {
144 sctx->b.b.create_video_codec = vl_create_decoder;
145 sctx->b.b.create_video_buffer = vl_video_buffer_create;
146 }
147
148 sctx->b.rings.gfx.cs = sctx->b.ws->cs_create(sctx->b.ws, RING_GFX, NULL);
149 sctx->b.rings.gfx.flush = si_flush_from_winsys;
150
151 si_init_all_descriptors(sctx);
152
153 /* Initialize cache_flush. */
154 sctx->cache_flush = si_atom_cache_flush;
155 sctx->atoms.cache_flush = &sctx->cache_flush;
156
157 sctx->atoms.streamout_begin = &sctx->b.streamout.begin_atom;
158 sctx->atoms.streamout_enable = &sctx->b.streamout.enable_atom;
159
160 switch (sctx->b.chip_class) {
161 case SI:
162 case CIK:
163 si_init_state_functions(sctx);
164 si_init_config(sctx);
165 break;
166 default:
167 R600_ERR("Unsupported chip class %d.\n", sctx->b.chip_class);
168 goto fail;
169 }
170
171 sctx->b.ws->cs_set_flush_callback(sctx->b.rings.gfx.cs, si_flush_from_winsys, sctx);
172
173 sctx->blitter = util_blitter_create(&sctx->b.b);
174 if (sctx->blitter == NULL)
175 goto fail;
176
177 sctx->dummy_pixel_shader =
178 util_make_fragment_cloneinput_shader(&sctx->b.b, 0,
179 TGSI_SEMANTIC_GENERIC,
180 TGSI_INTERPOLATE_CONSTANT);
181 sctx->b.b.bind_fs_state(&sctx->b.b, sctx->dummy_pixel_shader);
182
183 /* these must be last */
184 si_begin_new_cs(sctx);
185 r600_query_init_backend_mask(&sctx->b); /* this emits commands and must be last */
186
187 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
188 * with a NULL buffer). We need to use a dummy buffer instead. */
189 if (sctx->b.chip_class == CIK) {
190 sctx->null_const_buf.buffer = pipe_buffer_create(screen, PIPE_BIND_CONSTANT_BUFFER,
191 PIPE_USAGE_DEFAULT, 16);
192 sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0;
193
194 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
195 for (i = 0; i < NUM_CONST_BUFFERS; i++) {
196 sctx->b.b.set_constant_buffer(&sctx->b.b, shader, i,
197 &sctx->null_const_buf);
198 }
199 }
200
201 /* Clear the NULL constant buffer, because loads should return zeros. */
202 sctx->b.clear_buffer(&sctx->b.b, sctx->null_const_buf.buffer, 0,
203 sctx->null_const_buf.buffer->width0, 0);
204 }
205
206 return &sctx->b.b;
207 fail:
208 si_destroy_context(&sctx->b.b);
209 return NULL;
210 }
211
212 /*
213 * pipe_screen
214 */
215
216 static int si_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
217 {
218 struct si_screen *sscreen = (struct si_screen *)pscreen;
219
220 switch (param) {
221 /* Supported features (boolean caps). */
222 case PIPE_CAP_TWO_SIDED_STENCIL:
223 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
224 case PIPE_CAP_ANISOTROPIC_FILTER:
225 case PIPE_CAP_POINT_SPRITE:
226 case PIPE_CAP_OCCLUSION_QUERY:
227 case PIPE_CAP_TEXTURE_SHADOW_MAP:
228 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
229 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
230 case PIPE_CAP_TEXTURE_SWIZZLE:
231 case PIPE_CAP_DEPTH_CLIP_DISABLE:
232 case PIPE_CAP_SHADER_STENCIL_EXPORT:
233 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
234 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
235 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
236 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
237 case PIPE_CAP_SM3:
238 case PIPE_CAP_SEAMLESS_CUBE_MAP:
239 case PIPE_CAP_PRIMITIVE_RESTART:
240 case PIPE_CAP_CONDITIONAL_RENDER:
241 case PIPE_CAP_TEXTURE_BARRIER:
242 case PIPE_CAP_INDEP_BLEND_ENABLE:
243 case PIPE_CAP_INDEP_BLEND_FUNC:
244 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
245 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
246 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
247 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
248 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
249 case PIPE_CAP_USER_INDEX_BUFFERS:
250 case PIPE_CAP_USER_CONSTANT_BUFFERS:
251 case PIPE_CAP_START_INSTANCE:
252 case PIPE_CAP_NPOT_TEXTURES:
253 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
254 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
255 case PIPE_CAP_TGSI_INSTANCEID:
256 case PIPE_CAP_COMPUTE:
257 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
258 case PIPE_CAP_TGSI_VS_LAYER:
259 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
260 case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
261 return 1;
262
263 case PIPE_CAP_TEXTURE_MULTISAMPLE:
264 /* 2D tiling on CIK is supported since DRM 2.35.0 */
265 return HAVE_LLVM >= 0x0304 && (sscreen->b.chip_class < CIK ||
266 sscreen->b.info.drm_minor >= 35);
267
268 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
269 return R600_MAP_BUFFER_ALIGNMENT;
270
271 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
272 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
273 return 4;
274
275 case PIPE_CAP_GLSL_FEATURE_LEVEL:
276 return HAVE_LLVM >= 0x0305 ? 330 : 140;
277
278 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
279 return MIN2(sscreen->b.info.vram_size, 0xFFFFFFFF);
280
281 /* Unsupported features. */
282 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
283 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
284 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
285 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
286 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
287 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
288 case PIPE_CAP_USER_VERTEX_BUFFERS:
289 case PIPE_CAP_CUBE_MAP_ARRAY:
290 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
291 case PIPE_CAP_TEXTURE_GATHER_SM5:
292 case PIPE_CAP_TGSI_TEXCOORD:
293 case PIPE_CAP_FAKE_SW_MSAA:
294 case PIPE_CAP_TEXTURE_QUERY_LOD:
295 return 0;
296
297 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
298 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600;
299
300 /* Stream output. */
301 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
302 return sscreen->b.has_streamout ? 4 : 0;
303 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
304 return sscreen->b.has_streamout ? 1 : 0;
305 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
306 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
307 return sscreen->b.has_streamout ? 32*4 : 0;
308
309 /* Geometry shader output. */
310 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
311 return 1024;
312 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
313 return 4095;
314
315 /* Texturing. */
316 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
317 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
318 return 15; /* 16384 */
319 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
320 /* textures support 8192, but layered rendering supports 2048 */
321 return 12;
322 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
323 /* textures support 8192, but layered rendering supports 2048 */
324 return 2048;
325
326 /* Render targets. */
327 case PIPE_CAP_MAX_RENDER_TARGETS:
328 return 8;
329
330 case PIPE_CAP_MAX_VIEWPORTS:
331 return 1;
332
333 /* Timer queries, present when the clock frequency is non zero. */
334 case PIPE_CAP_QUERY_TIMESTAMP:
335 case PIPE_CAP_QUERY_TIME_ELAPSED:
336 return sscreen->b.info.r600_clock_crystal_freq != 0;
337
338 case PIPE_CAP_MIN_TEXEL_OFFSET:
339 return -8;
340
341 case PIPE_CAP_MAX_TEXEL_OFFSET:
342 return 7;
343 case PIPE_CAP_ENDIANNESS:
344 return PIPE_ENDIAN_LITTLE;
345 }
346 return 0;
347 }
348
349 static int si_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param)
350 {
351 switch(shader)
352 {
353 case PIPE_SHADER_FRAGMENT:
354 case PIPE_SHADER_VERTEX:
355 break;
356 case PIPE_SHADER_GEOMETRY:
357 #if HAVE_LLVM < 0x0305
358 return 0;
359 #endif
360 break;
361 case PIPE_SHADER_COMPUTE:
362 switch (param) {
363 case PIPE_SHADER_CAP_PREFERRED_IR:
364 return PIPE_SHADER_IR_LLVM;
365 default:
366 return 0;
367 }
368 default:
369 /* TODO: support tessellation */
370 return 0;
371 }
372
373 switch (param) {
374 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
375 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
376 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
377 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
378 return 16384;
379 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
380 return 32;
381 case PIPE_SHADER_CAP_MAX_INPUTS:
382 return 32;
383 case PIPE_SHADER_CAP_MAX_TEMPS:
384 return 256; /* Max native temporaries. */
385 case PIPE_SHADER_CAP_MAX_ADDRS:
386 /* FIXME Isn't this equal to TEMPS? */
387 return 1; /* Max native address registers */
388 case PIPE_SHADER_CAP_MAX_CONSTS:
389 return 4096; /* actually only memory limits this */
390 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
391 return NUM_PIPE_CONST_BUFFERS;
392 case PIPE_SHADER_CAP_MAX_PREDS:
393 return 0; /* FIXME */
394 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
395 return 1;
396 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
397 return 0;
398 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
399 /* Indirection of geometry shader input dimension is not
400 * handled yet
401 */
402 return shader < PIPE_SHADER_GEOMETRY;
403 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
404 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
405 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
406 return 1;
407 case PIPE_SHADER_CAP_INTEGERS:
408 return 1;
409 case PIPE_SHADER_CAP_SUBROUTINES:
410 return 0;
411 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
412 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
413 return 16;
414 case PIPE_SHADER_CAP_PREFERRED_IR:
415 return PIPE_SHADER_IR_TGSI;
416 }
417 return 0;
418 }
419
420 static void si_destroy_screen(struct pipe_screen* pscreen)
421 {
422 struct si_screen *sscreen = (struct si_screen *)pscreen;
423
424 if (sscreen == NULL)
425 return;
426
427 if (!radeon_winsys_unref(sscreen->b.ws))
428 return;
429
430 r600_destroy_common_screen(&sscreen->b);
431 }
432
433 struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
434 {
435 struct si_screen *sscreen = CALLOC_STRUCT(si_screen);
436 if (sscreen == NULL) {
437 return NULL;
438 }
439
440 /* Set functions first. */
441 sscreen->b.b.context_create = si_create_context;
442 sscreen->b.b.destroy = si_destroy_screen;
443 sscreen->b.b.get_param = si_get_param;
444 sscreen->b.b.get_shader_param = si_get_shader_param;
445 sscreen->b.b.is_format_supported = si_is_format_supported;
446 sscreen->b.b.resource_create = r600_resource_create_common;
447
448 if (!r600_common_screen_init(&sscreen->b, ws)) {
449 FREE(sscreen);
450 return NULL;
451 }
452
453 sscreen->b.has_cp_dma = true;
454 sscreen->b.has_streamout = HAVE_LLVM >= 0x0304;
455
456 if (debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE))
457 sscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS;
458
459 /* Create the auxiliary context. This must be done last. */
460 sscreen->b.aux_context = sscreen->b.b.context_create(&sscreen->b.b, NULL);
461
462 return &sscreen->b.b;
463 }