nouveau: add support for nir
[mesa.git] / src / gallium / drivers / nouveau / nv50 / nv50_screen.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <errno.h>
24 #include <xf86drm.h>
25 #include <nouveau_drm.h>
26 #include "util/u_format.h"
27 #include "util/u_format_s3tc.h"
28 #include "util/u_screen.h"
29 #include "pipe/p_screen.h"
30 #include "compiler/nir/nir.h"
31
32 #include "nv50/nv50_context.h"
33 #include "nv50/nv50_screen.h"
34
35 #include "nouveau_vp3_video.h"
36
37 #include "nv_object.xml.h"
38
39 /* affected by LOCAL_WARPS_LOG_ALLOC / LOCAL_WARPS_NO_CLAMP */
40 #define LOCAL_WARPS_ALLOC 32
41 /* affected by STACK_WARPS_LOG_ALLOC / STACK_WARPS_NO_CLAMP */
42 #define STACK_WARPS_ALLOC 32
43
44 #define THREADS_IN_WARP 32
45
46 static boolean
47 nv50_screen_is_format_supported(struct pipe_screen *pscreen,
48 enum pipe_format format,
49 enum pipe_texture_target target,
50 unsigned sample_count,
51 unsigned storage_sample_count,
52 unsigned bindings)
53 {
54 if (sample_count > 8)
55 return false;
56 if (!(0x117 & (1 << sample_count))) /* 0, 1, 2, 4 or 8 */
57 return false;
58 if (sample_count == 8 && util_format_get_blocksizebits(format) >= 128)
59 return false;
60
61 if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
62 return false;
63
64 switch (format) {
65 case PIPE_FORMAT_Z16_UNORM:
66 if (nv50_screen(pscreen)->tesla->oclass < NVA0_3D_CLASS)
67 return false;
68 break;
69 default:
70 break;
71 }
72
73 if (bindings & PIPE_BIND_LINEAR)
74 if (util_format_is_depth_or_stencil(format) ||
75 (target != PIPE_TEXTURE_1D &&
76 target != PIPE_TEXTURE_2D &&
77 target != PIPE_TEXTURE_RECT) ||
78 sample_count > 1)
79 return false;
80
81 /* shared is always supported */
82 bindings &= ~(PIPE_BIND_LINEAR |
83 PIPE_BIND_SHARED);
84
85 return (( nv50_format_table[format].usage |
86 nv50_vertex_format[format].usage) & bindings) == bindings;
87 }
88
89 static int
90 nv50_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
91 {
92 const uint16_t class_3d = nouveau_screen(pscreen)->class_3d;
93 struct nouveau_device *dev = nouveau_screen(pscreen)->device;
94
95 switch (param) {
96 /* non-boolean caps */
97 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
98 return 14;
99 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
100 return 12;
101 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
102 return 14;
103 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
104 return 512;
105 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
106 case PIPE_CAP_MIN_TEXEL_OFFSET:
107 return -8;
108 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
109 case PIPE_CAP_MAX_TEXEL_OFFSET:
110 return 7;
111 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
112 return 128 * 1024 * 1024;
113 case PIPE_CAP_GLSL_FEATURE_LEVEL:
114 return 330;
115 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
116 return 330;
117 case PIPE_CAP_MAX_RENDER_TARGETS:
118 return 8;
119 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
120 return 1;
121 case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
122 case PIPE_CAP_RASTERIZER_SUBPIXEL_BITS:
123 return 8;
124 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
125 return 4;
126 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
127 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
128 return 64;
129 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
130 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
131 return 1024;
132 case PIPE_CAP_MAX_VERTEX_STREAMS:
133 return 1;
134 case PIPE_CAP_MAX_GS_INVOCATIONS:
135 return 0;
136 case PIPE_CAP_MAX_SHADER_BUFFER_SIZE:
137 return 0;
138 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
139 return 2048;
140 case PIPE_CAP_MAX_VERTEX_ELEMENT_SRC_OFFSET:
141 return 2047;
142 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
143 return 256;
144 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
145 return 16; /* 256 for binding as RT, but that's not possible in GL */
146 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
147 return NOUVEAU_MIN_BUFFER_MAP_ALIGN;
148 case PIPE_CAP_MAX_VIEWPORTS:
149 return NV50_MAX_VIEWPORTS;
150 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
151 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_NV50;
152 case PIPE_CAP_ENDIANNESS:
153 return PIPE_ENDIAN_LITTLE;
154 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
155 return (class_3d >= NVA3_3D_CLASS) ? 4 : 0;
156 case PIPE_CAP_MAX_WINDOW_RECTANGLES:
157 return NV50_MAX_WINDOW_RECTANGLES;
158 case PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET:
159 return 16 * 1024 * 1024;
160 case PIPE_CAP_MAX_VARYINGS:
161 return 15;
162
163 /* supported caps */
164 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
165 case PIPE_CAP_TEXTURE_MIRROR_CLAMP_TO_EDGE:
166 case PIPE_CAP_TEXTURE_SWIZZLE:
167 case PIPE_CAP_NPOT_TEXTURES:
168 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
169 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
170 case PIPE_CAP_ANISOTROPIC_FILTER:
171 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
172 case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
173 case PIPE_CAP_DEPTH_CLIP_DISABLE:
174 case PIPE_CAP_POINT_SPRITE:
175 case PIPE_CAP_SM3:
176 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
177 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
178 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
179 case PIPE_CAP_QUERY_TIMESTAMP:
180 case PIPE_CAP_QUERY_TIME_ELAPSED:
181 case PIPE_CAP_OCCLUSION_QUERY:
182 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
183 case PIPE_CAP_INDEP_BLEND_ENABLE:
184 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
185 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
186 case PIPE_CAP_PRIMITIVE_RESTART:
187 case PIPE_CAP_TGSI_INSTANCEID:
188 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
189 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
190 case PIPE_CAP_CONDITIONAL_RENDER:
191 case PIPE_CAP_TEXTURE_BARRIER:
192 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
193 case PIPE_CAP_START_INSTANCE:
194 case PIPE_CAP_USER_VERTEX_BUFFERS:
195 case PIPE_CAP_TEXTURE_MULTISAMPLE:
196 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
197 case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE:
198 case PIPE_CAP_SAMPLER_VIEW_TARGET:
199 case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
200 case PIPE_CAP_CLIP_HALFZ:
201 case PIPE_CAP_POLYGON_OFFSET_CLAMP:
202 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
203 case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
204 case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
205 case PIPE_CAP_DEPTH_BOUNDS_TEST:
206 case PIPE_CAP_TGSI_TXQS:
207 case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
208 case PIPE_CAP_SHAREABLE_SHADERS:
209 case PIPE_CAP_CLEAR_TEXTURE:
210 case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL:
211 case PIPE_CAP_INVALIDATE_BUFFER:
212 case PIPE_CAP_STRING_MARKER:
213 case PIPE_CAP_CULL_DISTANCE:
214 case PIPE_CAP_TGSI_ARRAY_COMPONENTS:
215 case PIPE_CAP_TGSI_MUL_ZERO_WINS:
216 case PIPE_CAP_TGSI_TEX_TXF_LZ:
217 case PIPE_CAP_TGSI_CLOCK:
218 case PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX:
219 case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
220 case PIPE_CAP_DEST_SURFACE_SRGB_CONTROL:
221 return 1;
222 case PIPE_CAP_SEAMLESS_CUBE_MAP:
223 return 1; /* class_3d >= NVA0_3D_CLASS; */
224 /* supported on nva0+ */
225 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
226 return class_3d >= NVA0_3D_CLASS;
227 /* supported on nva3+ */
228 case PIPE_CAP_CUBE_MAP_ARRAY:
229 case PIPE_CAP_INDEP_BLEND_FUNC:
230 case PIPE_CAP_TEXTURE_QUERY_LOD:
231 case PIPE_CAP_SAMPLE_SHADING:
232 case PIPE_CAP_FORCE_PERSAMPLE_INTERP:
233 return class_3d >= NVA3_3D_CLASS;
234
235 /* unsupported caps */
236 case PIPE_CAP_DEPTH_CLIP_DISABLE_SEPARATE:
237 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
238 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
239 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
240 case PIPE_CAP_SHADER_STENCIL_EXPORT:
241 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
242 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
243 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
244 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
245 case PIPE_CAP_TGSI_TEXCOORD:
246 case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT:
247 case PIPE_CAP_TEXTURE_GATHER_SM5:
248 case PIPE_CAP_FAKE_SW_MSAA:
249 case PIPE_CAP_TEXTURE_GATHER_OFFSETS:
250 case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION:
251 case PIPE_CAP_DRAW_INDIRECT:
252 case PIPE_CAP_MULTI_DRAW_INDIRECT:
253 case PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS:
254 case PIPE_CAP_VERTEXID_NOBASE:
255 case PIPE_CAP_MULTISAMPLE_Z_RESOLVE: /* potentially supported on some hw */
256 case PIPE_CAP_RESOURCE_FROM_USER_MEMORY:
257 case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
258 case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
259 case PIPE_CAP_DRAW_PARAMETERS:
260 case PIPE_CAP_TGSI_PACK_HALF_FLOAT:
261 case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL:
262 case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
263 case PIPE_CAP_GENERATE_MIPMAP:
264 case PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY:
265 case PIPE_CAP_SURFACE_REINTERPRET_BLOCKS:
266 case PIPE_CAP_QUERY_BUFFER_OBJECT:
267 case PIPE_CAP_QUERY_MEMORY_INFO:
268 case PIPE_CAP_PCI_GROUP:
269 case PIPE_CAP_PCI_BUS:
270 case PIPE_CAP_PCI_DEVICE:
271 case PIPE_CAP_PCI_FUNCTION:
272 case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
273 case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
274 case PIPE_CAP_PRIMITIVE_RESTART_FOR_PATCHES:
275 case PIPE_CAP_TGSI_VOTE:
276 case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED:
277 case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
278 case PIPE_CAP_TGSI_CAN_READ_OUTPUTS:
279 case PIPE_CAP_NATIVE_FENCE_FD:
280 case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
281 case PIPE_CAP_TGSI_FS_FBFETCH:
282 case PIPE_CAP_DOUBLES:
283 case PIPE_CAP_INT64:
284 case PIPE_CAP_INT64_DIVMOD:
285 case PIPE_CAP_POLYGON_MODE_FILL_RECTANGLE:
286 case PIPE_CAP_SPARSE_BUFFER_PAGE_SIZE:
287 case PIPE_CAP_TGSI_BALLOT:
288 case PIPE_CAP_TGSI_TES_LAYER_VIEWPORT:
289 case PIPE_CAP_POST_DEPTH_COVERAGE:
290 case PIPE_CAP_BINDLESS_TEXTURE:
291 case PIPE_CAP_NIR_SAMPLERS_AS_DEREF:
292 case PIPE_CAP_QUERY_SO_OVERFLOW:
293 case PIPE_CAP_MEMOBJ:
294 case PIPE_CAP_LOAD_CONSTBUF:
295 case PIPE_CAP_TGSI_ANY_REG_AS_ADDRESS:
296 case PIPE_CAP_TILE_RASTER_ORDER:
297 case PIPE_CAP_MAX_COMBINED_SHADER_OUTPUT_RESOURCES:
298 case PIPE_CAP_FRAMEBUFFER_MSAA_CONSTRAINTS:
299 case PIPE_CAP_SIGNED_VERTEX_BUFFER_OFFSET:
300 case PIPE_CAP_CONTEXT_PRIORITY_MASK:
301 case PIPE_CAP_FENCE_SIGNAL:
302 case PIPE_CAP_CONSTBUF0_FLAGS:
303 case PIPE_CAP_PACKED_UNIFORMS:
304 case PIPE_CAP_CONSERVATIVE_RASTER_POST_SNAP_TRIANGLES:
305 case PIPE_CAP_CONSERVATIVE_RASTER_POST_SNAP_POINTS_LINES:
306 case PIPE_CAP_CONSERVATIVE_RASTER_PRE_SNAP_TRIANGLES:
307 case PIPE_CAP_CONSERVATIVE_RASTER_PRE_SNAP_POINTS_LINES:
308 case PIPE_CAP_CONSERVATIVE_RASTER_POST_DEPTH_COVERAGE:
309 case PIPE_CAP_MAX_CONSERVATIVE_RASTER_SUBPIXEL_PRECISION_BIAS:
310 case PIPE_CAP_PROGRAMMABLE_SAMPLE_LOCATIONS:
311 case PIPE_CAP_MAX_COMBINED_SHADER_BUFFERS:
312 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTERS:
313 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTER_BUFFERS:
314 case PIPE_CAP_SURFACE_SAMPLE_COUNT:
315 case PIPE_CAP_TGSI_ATOMFADD:
316 case PIPE_CAP_QUERY_PIPELINE_STATISTICS_SINGLE:
317 case PIPE_CAP_RGB_OVERRIDE_DST_ALPHA_BLEND:
318 case PIPE_CAP_GLSL_TESS_LEVELS_AS_INPUTS:
319 case PIPE_CAP_NIR_COMPACT_ARRAYS:
320 case PIPE_CAP_COMPUTE:
321 return 0;
322
323 case PIPE_CAP_VENDOR_ID:
324 return 0x10de;
325 case PIPE_CAP_DEVICE_ID: {
326 uint64_t device_id;
327 if (nouveau_getparam(dev, NOUVEAU_GETPARAM_PCI_DEVICE, &device_id)) {
328 NOUVEAU_ERR("NOUVEAU_GETPARAM_PCI_DEVICE failed.\n");
329 return -1;
330 }
331 return device_id;
332 }
333 case PIPE_CAP_ACCELERATED:
334 return 1;
335 case PIPE_CAP_VIDEO_MEMORY:
336 return dev->vram_size >> 20;
337 case PIPE_CAP_UMA:
338 return 0;
339 default:
340 debug_printf("%s: unhandled cap %d\n", __func__, param);
341 return u_pipe_screen_get_param_defaults(pscreen, param);
342 }
343 }
344
345 static int
346 nv50_screen_get_shader_param(struct pipe_screen *pscreen,
347 enum pipe_shader_type shader,
348 enum pipe_shader_cap param)
349 {
350 const struct nouveau_screen *screen = nouveau_screen(pscreen);
351
352 switch (shader) {
353 case PIPE_SHADER_VERTEX:
354 case PIPE_SHADER_GEOMETRY:
355 case PIPE_SHADER_FRAGMENT:
356 break;
357 case PIPE_SHADER_COMPUTE:
358 default:
359 return 0;
360 }
361
362 switch (param) {
363 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
364 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
365 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
366 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
367 return 16384;
368 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
369 return 4;
370 case PIPE_SHADER_CAP_MAX_INPUTS:
371 if (shader == PIPE_SHADER_VERTEX)
372 return 32;
373 return 15;
374 case PIPE_SHADER_CAP_MAX_OUTPUTS:
375 return 16;
376 case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE:
377 return 65536;
378 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
379 return NV50_MAX_PIPE_CONSTBUFS;
380 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
381 return shader != PIPE_SHADER_FRAGMENT;
382 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
383 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
384 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
385 return 1;
386 case PIPE_SHADER_CAP_MAX_TEMPS:
387 return nv50_screen(pscreen)->max_tls_space / ONE_TEMP_SIZE;
388 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
389 return 1;
390 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
391 return 1;
392 case PIPE_SHADER_CAP_INT64_ATOMICS:
393 case PIPE_SHADER_CAP_FP16:
394 case PIPE_SHADER_CAP_SUBROUTINES:
395 return 0; /* please inline, or provide function declarations */
396 case PIPE_SHADER_CAP_INTEGERS:
397 return 1;
398 case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
399 return 1;
400 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
401 /* The chip could handle more sampler views than samplers */
402 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
403 return MIN2(16, PIPE_MAX_SAMPLERS);
404 case PIPE_SHADER_CAP_PREFERRED_IR:
405 return screen->prefer_nir ? PIPE_SHADER_IR_NIR : PIPE_SHADER_IR_TGSI;
406 case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
407 return 32;
408 case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED:
409 case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED:
410 case PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED:
411 case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED:
412 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
413 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
414 case PIPE_SHADER_CAP_SUPPORTED_IRS:
415 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
416 case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
417 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
418 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
419 return 0;
420 case PIPE_SHADER_CAP_SCALAR_ISA:
421 return 1;
422 default:
423 NOUVEAU_ERR("unknown PIPE_SHADER_CAP %d\n", param);
424 return 0;
425 }
426 }
427
428 static float
429 nv50_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
430 {
431 switch (param) {
432 case PIPE_CAPF_MAX_LINE_WIDTH:
433 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
434 return 10.0f;
435 case PIPE_CAPF_MAX_POINT_WIDTH:
436 case PIPE_CAPF_MAX_POINT_WIDTH_AA:
437 return 64.0f;
438 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
439 return 16.0f;
440 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
441 return 4.0f;
442 case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
443 case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
444 case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
445 return 0.0f;
446 }
447
448 NOUVEAU_ERR("unknown PIPE_CAPF %d\n", param);
449 return 0.0f;
450 }
451
452 static int
453 nv50_screen_get_compute_param(struct pipe_screen *pscreen,
454 enum pipe_shader_ir ir_type,
455 enum pipe_compute_cap param, void *data)
456 {
457 struct nv50_screen *screen = nv50_screen(pscreen);
458
459 #define RET(x) do { \
460 if (data) \
461 memcpy(data, x, sizeof(x)); \
462 return sizeof(x); \
463 } while (0)
464
465 switch (param) {
466 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
467 RET((uint64_t []) { 2 });
468 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
469 RET(((uint64_t []) { 65535, 65535 }));
470 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
471 RET(((uint64_t []) { 512, 512, 64 }));
472 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
473 RET((uint64_t []) { 512 });
474 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE: /* g0-15[] */
475 RET((uint64_t []) { 1ULL << 32 });
476 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE: /* s[] */
477 RET((uint64_t []) { 16 << 10 });
478 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE: /* l[] */
479 RET((uint64_t []) { 16 << 10 });
480 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE: /* c[], arbitrary limit */
481 RET((uint64_t []) { 4096 });
482 case PIPE_COMPUTE_CAP_SUBGROUP_SIZE:
483 RET((uint32_t []) { 32 });
484 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
485 RET((uint64_t []) { 1ULL << 40 });
486 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
487 RET((uint32_t []) { 0 });
488 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
489 RET((uint32_t []) { screen->mp_count });
490 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
491 RET((uint32_t []) { 512 }); /* FIXME: arbitrary limit */
492 case PIPE_COMPUTE_CAP_ADDRESS_BITS:
493 RET((uint32_t []) { 32 });
494 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
495 RET((uint64_t []) { 0 });
496 default:
497 return 0;
498 }
499
500 #undef RET
501 }
502
503 static void
504 nv50_screen_destroy(struct pipe_screen *pscreen)
505 {
506 struct nv50_screen *screen = nv50_screen(pscreen);
507
508 if (!nouveau_drm_screen_unref(&screen->base))
509 return;
510
511 if (screen->base.fence.current) {
512 struct nouveau_fence *current = NULL;
513
514 /* nouveau_fence_wait will create a new current fence, so wait on the
515 * _current_ one, and remove both.
516 */
517 nouveau_fence_ref(screen->base.fence.current, &current);
518 nouveau_fence_wait(current, NULL);
519 nouveau_fence_ref(NULL, &current);
520 nouveau_fence_ref(NULL, &screen->base.fence.current);
521 }
522 if (screen->base.pushbuf)
523 screen->base.pushbuf->user_priv = NULL;
524
525 if (screen->blitter)
526 nv50_blitter_destroy(screen);
527 if (screen->pm.prog) {
528 screen->pm.prog->code = NULL; /* hardcoded, don't FREE */
529 nv50_program_destroy(NULL, screen->pm.prog);
530 FREE(screen->pm.prog);
531 }
532
533 nouveau_bo_ref(NULL, &screen->code);
534 nouveau_bo_ref(NULL, &screen->tls_bo);
535 nouveau_bo_ref(NULL, &screen->stack_bo);
536 nouveau_bo_ref(NULL, &screen->txc);
537 nouveau_bo_ref(NULL, &screen->uniforms);
538 nouveau_bo_ref(NULL, &screen->fence.bo);
539
540 nouveau_heap_destroy(&screen->vp_code_heap);
541 nouveau_heap_destroy(&screen->gp_code_heap);
542 nouveau_heap_destroy(&screen->fp_code_heap);
543
544 FREE(screen->tic.entries);
545
546 nouveau_object_del(&screen->tesla);
547 nouveau_object_del(&screen->eng2d);
548 nouveau_object_del(&screen->m2mf);
549 nouveau_object_del(&screen->compute);
550 nouveau_object_del(&screen->sync);
551
552 nouveau_screen_fini(&screen->base);
553
554 FREE(screen);
555 }
556
557 static void
558 nv50_screen_fence_emit(struct pipe_screen *pscreen, u32 *sequence)
559 {
560 struct nv50_screen *screen = nv50_screen(pscreen);
561 struct nouveau_pushbuf *push = screen->base.pushbuf;
562
563 /* we need to do it after possible flush in MARK_RING */
564 *sequence = ++screen->base.fence.sequence;
565
566 assert(PUSH_AVAIL(push) + push->rsvd_kick >= 5);
567 PUSH_DATA (push, NV50_FIFO_PKHDR(NV50_3D(QUERY_ADDRESS_HIGH), 4));
568 PUSH_DATAh(push, screen->fence.bo->offset);
569 PUSH_DATA (push, screen->fence.bo->offset);
570 PUSH_DATA (push, *sequence);
571 PUSH_DATA (push, NV50_3D_QUERY_GET_MODE_WRITE_UNK0 |
572 NV50_3D_QUERY_GET_UNK4 |
573 NV50_3D_QUERY_GET_UNIT_CROP |
574 NV50_3D_QUERY_GET_TYPE_QUERY |
575 NV50_3D_QUERY_GET_QUERY_SELECT_ZERO |
576 NV50_3D_QUERY_GET_SHORT);
577 }
578
579 static u32
580 nv50_screen_fence_update(struct pipe_screen *pscreen)
581 {
582 return nv50_screen(pscreen)->fence.map[0];
583 }
584
585 static void
586 nv50_screen_init_hwctx(struct nv50_screen *screen)
587 {
588 struct nouveau_pushbuf *push = screen->base.pushbuf;
589 struct nv04_fifo *fifo;
590 unsigned i;
591
592 fifo = (struct nv04_fifo *)screen->base.channel->data;
593
594 BEGIN_NV04(push, SUBC_M2MF(NV01_SUBCHAN_OBJECT), 1);
595 PUSH_DATA (push, screen->m2mf->handle);
596 BEGIN_NV04(push, SUBC_M2MF(NV03_M2MF_DMA_NOTIFY), 3);
597 PUSH_DATA (push, screen->sync->handle);
598 PUSH_DATA (push, fifo->vram);
599 PUSH_DATA (push, fifo->vram);
600
601 BEGIN_NV04(push, SUBC_2D(NV01_SUBCHAN_OBJECT), 1);
602 PUSH_DATA (push, screen->eng2d->handle);
603 BEGIN_NV04(push, NV50_2D(DMA_NOTIFY), 4);
604 PUSH_DATA (push, screen->sync->handle);
605 PUSH_DATA (push, fifo->vram);
606 PUSH_DATA (push, fifo->vram);
607 PUSH_DATA (push, fifo->vram);
608 BEGIN_NV04(push, NV50_2D(OPERATION), 1);
609 PUSH_DATA (push, NV50_2D_OPERATION_SRCCOPY);
610 BEGIN_NV04(push, NV50_2D(CLIP_ENABLE), 1);
611 PUSH_DATA (push, 0);
612 BEGIN_NV04(push, NV50_2D(COLOR_KEY_ENABLE), 1);
613 PUSH_DATA (push, 0);
614 BEGIN_NV04(push, SUBC_2D(0x0888), 1);
615 PUSH_DATA (push, 1);
616 BEGIN_NV04(push, NV50_2D(COND_MODE), 1);
617 PUSH_DATA (push, NV50_2D_COND_MODE_ALWAYS);
618
619 BEGIN_NV04(push, SUBC_3D(NV01_SUBCHAN_OBJECT), 1);
620 PUSH_DATA (push, screen->tesla->handle);
621
622 BEGIN_NV04(push, NV50_3D(COND_MODE), 1);
623 PUSH_DATA (push, NV50_3D_COND_MODE_ALWAYS);
624
625 BEGIN_NV04(push, NV50_3D(DMA_NOTIFY), 1);
626 PUSH_DATA (push, screen->sync->handle);
627 BEGIN_NV04(push, NV50_3D(DMA_ZETA), 11);
628 for (i = 0; i < 11; ++i)
629 PUSH_DATA(push, fifo->vram);
630 BEGIN_NV04(push, NV50_3D(DMA_COLOR(0)), NV50_3D_DMA_COLOR__LEN);
631 for (i = 0; i < NV50_3D_DMA_COLOR__LEN; ++i)
632 PUSH_DATA(push, fifo->vram);
633
634 BEGIN_NV04(push, NV50_3D(REG_MODE), 1);
635 PUSH_DATA (push, NV50_3D_REG_MODE_STRIPED);
636 BEGIN_NV04(push, NV50_3D(UNK1400_LANES), 1);
637 PUSH_DATA (push, 0xf);
638
639 if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", true)) {
640 BEGIN_NV04(push, NV50_3D(WATCHDOG_TIMER), 1);
641 PUSH_DATA (push, 0x18);
642 }
643
644 BEGIN_NV04(push, NV50_3D(ZETA_COMP_ENABLE), 1);
645 PUSH_DATA(push, screen->base.drm->version >= 0x01000101);
646
647 BEGIN_NV04(push, NV50_3D(RT_COMP_ENABLE(0)), 8);
648 for (i = 0; i < 8; ++i)
649 PUSH_DATA(push, screen->base.drm->version >= 0x01000101);
650
651 BEGIN_NV04(push, NV50_3D(RT_CONTROL), 1);
652 PUSH_DATA (push, 1);
653
654 BEGIN_NV04(push, NV50_3D(CSAA_ENABLE), 1);
655 PUSH_DATA (push, 0);
656 BEGIN_NV04(push, NV50_3D(MULTISAMPLE_ENABLE), 1);
657 PUSH_DATA (push, 0);
658 BEGIN_NV04(push, NV50_3D(MULTISAMPLE_MODE), 1);
659 PUSH_DATA (push, NV50_3D_MULTISAMPLE_MODE_MS1);
660 BEGIN_NV04(push, NV50_3D(MULTISAMPLE_CTRL), 1);
661 PUSH_DATA (push, 0);
662 BEGIN_NV04(push, NV50_3D(PRIM_RESTART_WITH_DRAW_ARRAYS), 1);
663 PUSH_DATA (push, 1);
664 BEGIN_NV04(push, NV50_3D(BLEND_SEPARATE_ALPHA), 1);
665 PUSH_DATA (push, 1);
666
667 if (screen->tesla->oclass >= NVA0_3D_CLASS) {
668 BEGIN_NV04(push, SUBC_3D(NVA0_3D_TEX_MISC), 1);
669 PUSH_DATA (push, 0);
670 }
671
672 BEGIN_NV04(push, NV50_3D(SCREEN_Y_CONTROL), 1);
673 PUSH_DATA (push, 0);
674 BEGIN_NV04(push, NV50_3D(WINDOW_OFFSET_X), 2);
675 PUSH_DATA (push, 0);
676 PUSH_DATA (push, 0);
677 BEGIN_NV04(push, NV50_3D(ZCULL_REGION), 1);
678 PUSH_DATA (push, 0x3f);
679
680 BEGIN_NV04(push, NV50_3D(VP_ADDRESS_HIGH), 2);
681 PUSH_DATAh(push, screen->code->offset + (0 << NV50_CODE_BO_SIZE_LOG2));
682 PUSH_DATA (push, screen->code->offset + (0 << NV50_CODE_BO_SIZE_LOG2));
683
684 BEGIN_NV04(push, NV50_3D(FP_ADDRESS_HIGH), 2);
685 PUSH_DATAh(push, screen->code->offset + (1 << NV50_CODE_BO_SIZE_LOG2));
686 PUSH_DATA (push, screen->code->offset + (1 << NV50_CODE_BO_SIZE_LOG2));
687
688 BEGIN_NV04(push, NV50_3D(GP_ADDRESS_HIGH), 2);
689 PUSH_DATAh(push, screen->code->offset + (2 << NV50_CODE_BO_SIZE_LOG2));
690 PUSH_DATA (push, screen->code->offset + (2 << NV50_CODE_BO_SIZE_LOG2));
691
692 BEGIN_NV04(push, NV50_3D(LOCAL_ADDRESS_HIGH), 3);
693 PUSH_DATAh(push, screen->tls_bo->offset);
694 PUSH_DATA (push, screen->tls_bo->offset);
695 PUSH_DATA (push, util_logbase2(screen->cur_tls_space / 8));
696
697 BEGIN_NV04(push, NV50_3D(STACK_ADDRESS_HIGH), 3);
698 PUSH_DATAh(push, screen->stack_bo->offset);
699 PUSH_DATA (push, screen->stack_bo->offset);
700 PUSH_DATA (push, 4);
701
702 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
703 PUSH_DATAh(push, screen->uniforms->offset + (0 << 16));
704 PUSH_DATA (push, screen->uniforms->offset + (0 << 16));
705 PUSH_DATA (push, (NV50_CB_PVP << 16) | 0x0000);
706
707 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
708 PUSH_DATAh(push, screen->uniforms->offset + (1 << 16));
709 PUSH_DATA (push, screen->uniforms->offset + (1 << 16));
710 PUSH_DATA (push, (NV50_CB_PGP << 16) | 0x0000);
711
712 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
713 PUSH_DATAh(push, screen->uniforms->offset + (2 << 16));
714 PUSH_DATA (push, screen->uniforms->offset + (2 << 16));
715 PUSH_DATA (push, (NV50_CB_PFP << 16) | 0x0000);
716
717 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
718 PUSH_DATAh(push, screen->uniforms->offset + (3 << 16));
719 PUSH_DATA (push, screen->uniforms->offset + (3 << 16));
720 PUSH_DATA (push, (NV50_CB_AUX << 16) | (NV50_CB_AUX_SIZE & 0xffff));
721
722 BEGIN_NI04(push, NV50_3D(SET_PROGRAM_CB), 3);
723 PUSH_DATA (push, (NV50_CB_AUX << 12) | 0xf01);
724 PUSH_DATA (push, (NV50_CB_AUX << 12) | 0xf21);
725 PUSH_DATA (push, (NV50_CB_AUX << 12) | 0xf31);
726
727 /* return { 0.0, 0.0, 0.0, 0.0 } on out-of-bounds vtxbuf access */
728 BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
729 PUSH_DATA (push, (NV50_CB_AUX_RUNOUT_OFFSET << (8 - 2)) | NV50_CB_AUX);
730 BEGIN_NI04(push, NV50_3D(CB_DATA(0)), 4);
731 PUSH_DATAf(push, 0.0f);
732 PUSH_DATAf(push, 0.0f);
733 PUSH_DATAf(push, 0.0f);
734 PUSH_DATAf(push, 0.0f);
735 BEGIN_NV04(push, NV50_3D(VERTEX_RUNOUT_ADDRESS_HIGH), 2);
736 PUSH_DATAh(push, screen->uniforms->offset + (3 << 16) + NV50_CB_AUX_RUNOUT_OFFSET);
737 PUSH_DATA (push, screen->uniforms->offset + (3 << 16) + NV50_CB_AUX_RUNOUT_OFFSET);
738
739 nv50_upload_ms_info(push);
740
741 /* max TIC (bits 4:8) & TSC bindings, per program type */
742 for (i = 0; i < 3; ++i) {
743 BEGIN_NV04(push, NV50_3D(TEX_LIMITS(i)), 1);
744 PUSH_DATA (push, 0x54);
745 }
746
747 BEGIN_NV04(push, NV50_3D(TIC_ADDRESS_HIGH), 3);
748 PUSH_DATAh(push, screen->txc->offset);
749 PUSH_DATA (push, screen->txc->offset);
750 PUSH_DATA (push, NV50_TIC_MAX_ENTRIES - 1);
751
752 BEGIN_NV04(push, NV50_3D(TSC_ADDRESS_HIGH), 3);
753 PUSH_DATAh(push, screen->txc->offset + 65536);
754 PUSH_DATA (push, screen->txc->offset + 65536);
755 PUSH_DATA (push, NV50_TSC_MAX_ENTRIES - 1);
756
757 BEGIN_NV04(push, NV50_3D(LINKED_TSC), 1);
758 PUSH_DATA (push, 0);
759
760 BEGIN_NV04(push, NV50_3D(CLIP_RECTS_EN), 1);
761 PUSH_DATA (push, 0);
762 BEGIN_NV04(push, NV50_3D(CLIP_RECTS_MODE), 1);
763 PUSH_DATA (push, NV50_3D_CLIP_RECTS_MODE_INSIDE_ANY);
764 BEGIN_NV04(push, NV50_3D(CLIP_RECT_HORIZ(0)), 8 * 2);
765 for (i = 0; i < 8 * 2; ++i)
766 PUSH_DATA(push, 0);
767 BEGIN_NV04(push, NV50_3D(CLIPID_ENABLE), 1);
768 PUSH_DATA (push, 0);
769
770 BEGIN_NV04(push, NV50_3D(VIEWPORT_TRANSFORM_EN), 1);
771 PUSH_DATA (push, 1);
772 for (i = 0; i < NV50_MAX_VIEWPORTS; i++) {
773 BEGIN_NV04(push, NV50_3D(DEPTH_RANGE_NEAR(i)), 2);
774 PUSH_DATAf(push, 0.0f);
775 PUSH_DATAf(push, 1.0f);
776 BEGIN_NV04(push, NV50_3D(VIEWPORT_HORIZ(i)), 2);
777 PUSH_DATA (push, 8192 << 16);
778 PUSH_DATA (push, 8192 << 16);
779 }
780
781 BEGIN_NV04(push, NV50_3D(VIEW_VOLUME_CLIP_CTRL), 1);
782 #ifdef NV50_SCISSORS_CLIPPING
783 PUSH_DATA (push, 0x0000);
784 #else
785 PUSH_DATA (push, 0x1080);
786 #endif
787
788 BEGIN_NV04(push, NV50_3D(CLEAR_FLAGS), 1);
789 PUSH_DATA (push, NV50_3D_CLEAR_FLAGS_CLEAR_RECT_VIEWPORT);
790
791 /* We use scissors instead of exact view volume clipping,
792 * so they're always enabled.
793 */
794 for (i = 0; i < NV50_MAX_VIEWPORTS; i++) {
795 BEGIN_NV04(push, NV50_3D(SCISSOR_ENABLE(i)), 3);
796 PUSH_DATA (push, 1);
797 PUSH_DATA (push, 8192 << 16);
798 PUSH_DATA (push, 8192 << 16);
799 }
800
801 BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
802 PUSH_DATA (push, 1);
803 BEGIN_NV04(push, NV50_3D(POINT_RASTER_RULES), 1);
804 PUSH_DATA (push, NV50_3D_POINT_RASTER_RULES_OGL);
805 BEGIN_NV04(push, NV50_3D(FRAG_COLOR_CLAMP_EN), 1);
806 PUSH_DATA (push, 0x11111111);
807 BEGIN_NV04(push, NV50_3D(EDGEFLAG), 1);
808 PUSH_DATA (push, 1);
809
810 BEGIN_NV04(push, NV50_3D(VB_ELEMENT_BASE), 1);
811 PUSH_DATA (push, 0);
812 if (screen->base.class_3d >= NV84_3D_CLASS) {
813 BEGIN_NV04(push, NV84_3D(VERTEX_ID_BASE), 1);
814 PUSH_DATA (push, 0);
815 }
816
817 BEGIN_NV04(push, NV50_3D(UNK0FDC), 1);
818 PUSH_DATA (push, 1);
819 BEGIN_NV04(push, NV50_3D(UNK19C0), 1);
820 PUSH_DATA (push, 1);
821
822 PUSH_KICK (push);
823 }
824
825 static int nv50_tls_alloc(struct nv50_screen *screen, unsigned tls_space,
826 uint64_t *tls_size)
827 {
828 struct nouveau_device *dev = screen->base.device;
829 int ret;
830
831 screen->cur_tls_space = util_next_power_of_two(tls_space / ONE_TEMP_SIZE) *
832 ONE_TEMP_SIZE;
833 if (nouveau_mesa_debug)
834 debug_printf("allocating space for %u temps\n",
835 util_next_power_of_two(tls_space / ONE_TEMP_SIZE));
836 *tls_size = screen->cur_tls_space * util_next_power_of_two(screen->TPs) *
837 screen->MPsInTP * LOCAL_WARPS_ALLOC * THREADS_IN_WARP;
838
839 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16,
840 *tls_size, NULL, &screen->tls_bo);
841 if (ret) {
842 NOUVEAU_ERR("Failed to allocate local bo: %d\n", ret);
843 return ret;
844 }
845
846 return 0;
847 }
848
849 int nv50_tls_realloc(struct nv50_screen *screen, unsigned tls_space)
850 {
851 struct nouveau_pushbuf *push = screen->base.pushbuf;
852 int ret;
853 uint64_t tls_size;
854
855 if (tls_space < screen->cur_tls_space)
856 return 0;
857 if (tls_space > screen->max_tls_space) {
858 /* fixable by limiting number of warps (LOCAL_WARPS_LOG_ALLOC /
859 * LOCAL_WARPS_NO_CLAMP) */
860 NOUVEAU_ERR("Unsupported number of temporaries (%u > %u). Fixable if someone cares.\n",
861 (unsigned)(tls_space / ONE_TEMP_SIZE),
862 (unsigned)(screen->max_tls_space / ONE_TEMP_SIZE));
863 return -ENOMEM;
864 }
865
866 nouveau_bo_ref(NULL, &screen->tls_bo);
867 ret = nv50_tls_alloc(screen, tls_space, &tls_size);
868 if (ret)
869 return ret;
870
871 BEGIN_NV04(push, NV50_3D(LOCAL_ADDRESS_HIGH), 3);
872 PUSH_DATAh(push, screen->tls_bo->offset);
873 PUSH_DATA (push, screen->tls_bo->offset);
874 PUSH_DATA (push, util_logbase2(screen->cur_tls_space / 8));
875
876 return 1;
877 }
878
879 static const nir_shader_compiler_options nir_options = {
880 .fuse_ffma = false, /* nir doesn't track mad vs fma */
881 .lower_flrp32 = true,
882 .lower_flrp64 = true,
883 .lower_fpow = false,
884 .lower_fmod64 = true,
885 .lower_uadd_carry = true,
886 .lower_usub_borrow = true,
887 .lower_ffract = true,
888 .lower_pack_half_2x16 = true,
889 .lower_pack_unorm_2x16 = true,
890 .lower_pack_snorm_2x16 = true,
891 .lower_pack_unorm_4x8 = true,
892 .lower_pack_snorm_4x8 = true,
893 .lower_unpack_half_2x16 = true,
894 .lower_unpack_unorm_2x16 = true,
895 .lower_unpack_snorm_2x16 = true,
896 .lower_unpack_unorm_4x8 = true,
897 .lower_unpack_snorm_4x8 = true,
898 .lower_extract_byte = true,
899 .lower_extract_word = true,
900 .lower_all_io_to_temps = false,
901 .native_integers = true,
902 .lower_cs_local_index_from_id = true,
903 .use_interpolated_input_intrinsics = true,
904 .max_unroll_iterations = 32,
905 };
906
907 static const void *
908 nv50_screen_get_compiler_options(struct pipe_screen *pscreen,
909 enum pipe_shader_ir ir,
910 enum pipe_shader_type shader)
911 {
912 if (ir == PIPE_SHADER_IR_NIR)
913 return &nir_options;
914 return NULL;
915 }
916
917 struct nouveau_screen *
918 nv50_screen_create(struct nouveau_device *dev)
919 {
920 struct nv50_screen *screen;
921 struct pipe_screen *pscreen;
922 struct nouveau_object *chan;
923 uint64_t value;
924 uint32_t tesla_class;
925 unsigned stack_size;
926 int ret;
927
928 screen = CALLOC_STRUCT(nv50_screen);
929 if (!screen)
930 return NULL;
931 pscreen = &screen->base.base;
932 pscreen->destroy = nv50_screen_destroy;
933
934 ret = nouveau_screen_init(&screen->base, dev);
935 if (ret) {
936 NOUVEAU_ERR("nouveau_screen_init failed: %d\n", ret);
937 goto fail;
938 }
939
940 /* TODO: Prevent FIFO prefetch before transfer of index buffers and
941 * admit them to VRAM.
942 */
943 screen->base.vidmem_bindings |= PIPE_BIND_CONSTANT_BUFFER |
944 PIPE_BIND_VERTEX_BUFFER;
945 screen->base.sysmem_bindings |=
946 PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER;
947
948 screen->base.pushbuf->user_priv = screen;
949 screen->base.pushbuf->rsvd_kick = 5;
950
951 chan = screen->base.channel;
952
953 pscreen->context_create = nv50_create;
954 pscreen->is_format_supported = nv50_screen_is_format_supported;
955 pscreen->get_param = nv50_screen_get_param;
956 pscreen->get_shader_param = nv50_screen_get_shader_param;
957 pscreen->get_paramf = nv50_screen_get_paramf;
958 pscreen->get_compute_param = nv50_screen_get_compute_param;
959 pscreen->get_driver_query_info = nv50_screen_get_driver_query_info;
960 pscreen->get_driver_query_group_info = nv50_screen_get_driver_query_group_info;
961
962 /* nir stuff */
963 pscreen->get_compiler_options = nv50_screen_get_compiler_options;
964
965 nv50_screen_init_resource_functions(pscreen);
966
967 if (screen->base.device->chipset < 0x84 ||
968 debug_get_bool_option("NOUVEAU_PMPEG", false)) {
969 /* PMPEG */
970 nouveau_screen_init_vdec(&screen->base);
971 } else if (screen->base.device->chipset < 0x98 ||
972 screen->base.device->chipset == 0xa0) {
973 /* VP2 */
974 screen->base.base.get_video_param = nv84_screen_get_video_param;
975 screen->base.base.is_video_format_supported = nv84_screen_video_supported;
976 } else {
977 /* VP3/4 */
978 screen->base.base.get_video_param = nouveau_vp3_screen_get_video_param;
979 screen->base.base.is_video_format_supported = nouveau_vp3_screen_video_supported;
980 }
981
982 ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096,
983 NULL, &screen->fence.bo);
984 if (ret) {
985 NOUVEAU_ERR("Failed to allocate fence bo: %d\n", ret);
986 goto fail;
987 }
988
989 nouveau_bo_map(screen->fence.bo, 0, NULL);
990 screen->fence.map = screen->fence.bo->map;
991 screen->base.fence.emit = nv50_screen_fence_emit;
992 screen->base.fence.update = nv50_screen_fence_update;
993
994 ret = nouveau_object_new(chan, 0xbeef0301, NOUVEAU_NOTIFIER_CLASS,
995 &(struct nv04_notify){ .length = 32 },
996 sizeof(struct nv04_notify), &screen->sync);
997 if (ret) {
998 NOUVEAU_ERR("Failed to allocate notifier: %d\n", ret);
999 goto fail;
1000 }
1001
1002 ret = nouveau_object_new(chan, 0xbeef5039, NV50_M2MF_CLASS,
1003 NULL, 0, &screen->m2mf);
1004 if (ret) {
1005 NOUVEAU_ERR("Failed to allocate PGRAPH context for M2MF: %d\n", ret);
1006 goto fail;
1007 }
1008
1009 ret = nouveau_object_new(chan, 0xbeef502d, NV50_2D_CLASS,
1010 NULL, 0, &screen->eng2d);
1011 if (ret) {
1012 NOUVEAU_ERR("Failed to allocate PGRAPH context for 2D: %d\n", ret);
1013 goto fail;
1014 }
1015
1016 switch (dev->chipset & 0xf0) {
1017 case 0x50:
1018 tesla_class = NV50_3D_CLASS;
1019 break;
1020 case 0x80:
1021 case 0x90:
1022 tesla_class = NV84_3D_CLASS;
1023 break;
1024 case 0xa0:
1025 switch (dev->chipset) {
1026 case 0xa0:
1027 case 0xaa:
1028 case 0xac:
1029 tesla_class = NVA0_3D_CLASS;
1030 break;
1031 case 0xaf:
1032 tesla_class = NVAF_3D_CLASS;
1033 break;
1034 default:
1035 tesla_class = NVA3_3D_CLASS;
1036 break;
1037 }
1038 break;
1039 default:
1040 NOUVEAU_ERR("Not a known NV50 chipset: NV%02x\n", dev->chipset);
1041 goto fail;
1042 }
1043 screen->base.class_3d = tesla_class;
1044
1045 ret = nouveau_object_new(chan, 0xbeef5097, tesla_class,
1046 NULL, 0, &screen->tesla);
1047 if (ret) {
1048 NOUVEAU_ERR("Failed to allocate PGRAPH context for 3D: %d\n", ret);
1049 goto fail;
1050 }
1051
1052 /* This over-allocates by a page. The GP, which would execute at the end of
1053 * the last page, would trigger faults. The going theory is that it
1054 * prefetches up to a certain amount.
1055 */
1056 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16,
1057 (3 << NV50_CODE_BO_SIZE_LOG2) + 0x1000,
1058 NULL, &screen->code);
1059 if (ret) {
1060 NOUVEAU_ERR("Failed to allocate code bo: %d\n", ret);
1061 goto fail;
1062 }
1063
1064 nouveau_heap_init(&screen->vp_code_heap, 0, 1 << NV50_CODE_BO_SIZE_LOG2);
1065 nouveau_heap_init(&screen->gp_code_heap, 0, 1 << NV50_CODE_BO_SIZE_LOG2);
1066 nouveau_heap_init(&screen->fp_code_heap, 0, 1 << NV50_CODE_BO_SIZE_LOG2);
1067
1068 nouveau_getparam(dev, NOUVEAU_GETPARAM_GRAPH_UNITS, &value);
1069
1070 screen->TPs = util_bitcount(value & 0xffff);
1071 screen->MPsInTP = util_bitcount(value & 0x0f000000);
1072
1073 screen->mp_count = screen->TPs * screen->MPsInTP;
1074
1075 stack_size = util_next_power_of_two(screen->TPs) * screen->MPsInTP *
1076 STACK_WARPS_ALLOC * 64 * 8;
1077
1078 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16, stack_size, NULL,
1079 &screen->stack_bo);
1080 if (ret) {
1081 NOUVEAU_ERR("Failed to allocate stack bo: %d\n", ret);
1082 goto fail;
1083 }
1084
1085 uint64_t size_of_one_temp = util_next_power_of_two(screen->TPs) *
1086 screen->MPsInTP * LOCAL_WARPS_ALLOC * THREADS_IN_WARP *
1087 ONE_TEMP_SIZE;
1088 screen->max_tls_space = dev->vram_size / size_of_one_temp * ONE_TEMP_SIZE;
1089 screen->max_tls_space /= 2; /* half of vram */
1090
1091 /* hw can address max 64 KiB */
1092 screen->max_tls_space = MIN2(screen->max_tls_space, 64 << 10);
1093
1094 uint64_t tls_size;
1095 unsigned tls_space = 4/*temps*/ * ONE_TEMP_SIZE;
1096 ret = nv50_tls_alloc(screen, tls_space, &tls_size);
1097 if (ret)
1098 goto fail;
1099
1100 if (nouveau_mesa_debug)
1101 debug_printf("TPs = %u, MPsInTP = %u, VRAM = %"PRIu64" MiB, tls_size = %"PRIu64" KiB\n",
1102 screen->TPs, screen->MPsInTP, dev->vram_size >> 20, tls_size >> 10);
1103
1104 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16, 4 << 16, NULL,
1105 &screen->uniforms);
1106 if (ret) {
1107 NOUVEAU_ERR("Failed to allocate uniforms bo: %d\n", ret);
1108 goto fail;
1109 }
1110
1111 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16, 3 << 16, NULL,
1112 &screen->txc);
1113 if (ret) {
1114 NOUVEAU_ERR("Failed to allocate TIC/TSC bo: %d\n", ret);
1115 goto fail;
1116 }
1117
1118 screen->tic.entries = CALLOC(4096, sizeof(void *));
1119 screen->tsc.entries = screen->tic.entries + 2048;
1120
1121 if (!nv50_blitter_create(screen))
1122 goto fail;
1123
1124 nv50_screen_init_hwctx(screen);
1125
1126 ret = nv50_screen_compute_setup(screen, screen->base.pushbuf);
1127 if (ret) {
1128 NOUVEAU_ERR("Failed to init compute context: %d\n", ret);
1129 goto fail;
1130 }
1131
1132 nouveau_fence_new(&screen->base, &screen->base.fence.current);
1133
1134 return &screen->base;
1135
1136 fail:
1137 screen->base.base.context_create = NULL;
1138 return &screen->base;
1139 }
1140
1141 int
1142 nv50_screen_tic_alloc(struct nv50_screen *screen, void *entry)
1143 {
1144 int i = screen->tic.next;
1145
1146 while (screen->tic.lock[i / 32] & (1 << (i % 32)))
1147 i = (i + 1) & (NV50_TIC_MAX_ENTRIES - 1);
1148
1149 screen->tic.next = (i + 1) & (NV50_TIC_MAX_ENTRIES - 1);
1150
1151 if (screen->tic.entries[i])
1152 nv50_tic_entry(screen->tic.entries[i])->id = -1;
1153
1154 screen->tic.entries[i] = entry;
1155 return i;
1156 }
1157
1158 int
1159 nv50_screen_tsc_alloc(struct nv50_screen *screen, void *entry)
1160 {
1161 int i = screen->tsc.next;
1162
1163 while (screen->tsc.lock[i / 32] & (1 << (i % 32)))
1164 i = (i + 1) & (NV50_TSC_MAX_ENTRIES - 1);
1165
1166 screen->tsc.next = (i + 1) & (NV50_TSC_MAX_ENTRIES - 1);
1167
1168 if (screen->tsc.entries[i])
1169 nv50_tsc_entry(screen->tsc.entries[i])->id = -1;
1170
1171 screen->tsc.entries[i] = entry;
1172 return i;
1173 }