gallium: add separate PIPE_CAP_INT64_DIVMOD
[mesa.git] / src / gallium / drivers / nouveau / nv50 / nv50_screen.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <errno.h>
24 #include <xf86drm.h>
25 #include <nouveau_drm.h>
26 #include "util/u_format.h"
27 #include "util/u_format_s3tc.h"
28 #include "pipe/p_screen.h"
29
30 #include "nv50/nv50_context.h"
31 #include "nv50/nv50_screen.h"
32
33 #include "nouveau_vp3_video.h"
34
35 #include "nv_object.xml.h"
36
37 /* affected by LOCAL_WARPS_LOG_ALLOC / LOCAL_WARPS_NO_CLAMP */
38 #define LOCAL_WARPS_ALLOC 32
39 /* affected by STACK_WARPS_LOG_ALLOC / STACK_WARPS_NO_CLAMP */
40 #define STACK_WARPS_ALLOC 32
41
42 #define THREADS_IN_WARP 32
43
44 static boolean
45 nv50_screen_is_format_supported(struct pipe_screen *pscreen,
46 enum pipe_format format,
47 enum pipe_texture_target target,
48 unsigned sample_count,
49 unsigned bindings)
50 {
51 if (sample_count > 8)
52 return false;
53 if (!(0x117 & (1 << sample_count))) /* 0, 1, 2, 4 or 8 */
54 return false;
55 if (sample_count == 8 && util_format_get_blocksizebits(format) >= 128)
56 return false;
57
58 if (!util_format_is_supported(format, bindings))
59 return false;
60
61 switch (format) {
62 case PIPE_FORMAT_Z16_UNORM:
63 if (nv50_screen(pscreen)->tesla->oclass < NVA0_3D_CLASS)
64 return false;
65 break;
66 default:
67 break;
68 }
69
70 if (bindings & PIPE_BIND_LINEAR)
71 if (util_format_is_depth_or_stencil(format) ||
72 (target != PIPE_TEXTURE_1D &&
73 target != PIPE_TEXTURE_2D &&
74 target != PIPE_TEXTURE_RECT) ||
75 sample_count > 1)
76 return false;
77
78 /* shared is always supported */
79 bindings &= ~(PIPE_BIND_LINEAR |
80 PIPE_BIND_SHARED);
81
82 return (( nv50_format_table[format].usage |
83 nv50_vertex_format[format].usage) & bindings) == bindings;
84 }
85
86 static int
87 nv50_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
88 {
89 const uint16_t class_3d = nouveau_screen(pscreen)->class_3d;
90 struct nouveau_device *dev = nouveau_screen(pscreen)->device;
91
92 switch (param) {
93 /* non-boolean caps */
94 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS:
95 return 14;
96 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
97 return 12;
98 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
99 return 14;
100 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
101 return 512;
102 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
103 case PIPE_CAP_MIN_TEXEL_OFFSET:
104 return -8;
105 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
106 case PIPE_CAP_MAX_TEXEL_OFFSET:
107 return 7;
108 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
109 return 128 * 1024 * 1024;
110 case PIPE_CAP_GLSL_FEATURE_LEVEL:
111 return 330;
112 case PIPE_CAP_MAX_RENDER_TARGETS:
113 return 8;
114 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
115 return 1;
116 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
117 return 4;
118 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
119 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
120 return 64;
121 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
122 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
123 return 1024;
124 case PIPE_CAP_MAX_VERTEX_STREAMS:
125 return 1;
126 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
127 return 2048;
128 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
129 return 256;
130 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
131 return 16; /* 256 for binding as RT, but that's not possible in GL */
132 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
133 return NOUVEAU_MIN_BUFFER_MAP_ALIGN;
134 case PIPE_CAP_MAX_VIEWPORTS:
135 return NV50_MAX_VIEWPORTS;
136 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
137 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_NV50;
138 case PIPE_CAP_ENDIANNESS:
139 return PIPE_ENDIAN_LITTLE;
140 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
141 return (class_3d >= NVA3_3D_CLASS) ? 4 : 0;
142 case PIPE_CAP_MAX_WINDOW_RECTANGLES:
143 return NV50_MAX_WINDOW_RECTANGLES;
144
145 /* supported caps */
146 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
147 case PIPE_CAP_TEXTURE_SWIZZLE:
148 case PIPE_CAP_TEXTURE_SHADOW_MAP:
149 case PIPE_CAP_NPOT_TEXTURES:
150 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
151 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
152 case PIPE_CAP_ANISOTROPIC_FILTER:
153 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
154 case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
155 case PIPE_CAP_TWO_SIDED_STENCIL:
156 case PIPE_CAP_DEPTH_CLIP_DISABLE:
157 case PIPE_CAP_POINT_SPRITE:
158 case PIPE_CAP_SM3:
159 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
160 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
161 case PIPE_CAP_VERTEX_COLOR_CLAMPED:
162 case PIPE_CAP_QUERY_TIMESTAMP:
163 case PIPE_CAP_QUERY_TIME_ELAPSED:
164 case PIPE_CAP_OCCLUSION_QUERY:
165 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
166 case PIPE_CAP_INDEP_BLEND_ENABLE:
167 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
168 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
169 case PIPE_CAP_PRIMITIVE_RESTART:
170 case PIPE_CAP_TGSI_INSTANCEID:
171 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
172 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
173 case PIPE_CAP_CONDITIONAL_RENDER:
174 case PIPE_CAP_TEXTURE_BARRIER:
175 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
176 case PIPE_CAP_START_INSTANCE:
177 case PIPE_CAP_USER_CONSTANT_BUFFERS:
178 case PIPE_CAP_USER_INDEX_BUFFERS:
179 case PIPE_CAP_USER_VERTEX_BUFFERS:
180 case PIPE_CAP_TEXTURE_MULTISAMPLE:
181 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
182 case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE:
183 case PIPE_CAP_SAMPLER_VIEW_TARGET:
184 case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
185 case PIPE_CAP_CLIP_HALFZ:
186 case PIPE_CAP_POLYGON_OFFSET_CLAMP:
187 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
188 case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
189 case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
190 case PIPE_CAP_DEPTH_BOUNDS_TEST:
191 case PIPE_CAP_TGSI_TXQS:
192 case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
193 case PIPE_CAP_SHAREABLE_SHADERS:
194 case PIPE_CAP_CLEAR_TEXTURE:
195 case PIPE_CAP_COMPUTE:
196 case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL:
197 case PIPE_CAP_INVALIDATE_BUFFER:
198 case PIPE_CAP_STRING_MARKER:
199 case PIPE_CAP_CULL_DISTANCE:
200 case PIPE_CAP_TGSI_ARRAY_COMPONENTS:
201 case PIPE_CAP_TGSI_MUL_ZERO_WINS:
202 return 1;
203 case PIPE_CAP_SEAMLESS_CUBE_MAP:
204 return 1; /* class_3d >= NVA0_3D_CLASS; */
205 /* supported on nva0+ */
206 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
207 return class_3d >= NVA0_3D_CLASS;
208 /* supported on nva3+ */
209 case PIPE_CAP_CUBE_MAP_ARRAY:
210 case PIPE_CAP_INDEP_BLEND_FUNC:
211 case PIPE_CAP_TEXTURE_QUERY_LOD:
212 case PIPE_CAP_SAMPLE_SHADING:
213 case PIPE_CAP_FORCE_PERSAMPLE_INTERP:
214 return class_3d >= NVA3_3D_CLASS;
215
216 /* unsupported caps */
217 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
218 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
219 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
220 case PIPE_CAP_SHADER_STENCIL_EXPORT:
221 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS:
222 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
223 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
224 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
225 case PIPE_CAP_TGSI_TEXCOORD:
226 case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT:
227 case PIPE_CAP_TEXTURE_GATHER_SM5:
228 case PIPE_CAP_FAKE_SW_MSAA:
229 case PIPE_CAP_TEXTURE_GATHER_OFFSETS:
230 case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION:
231 case PIPE_CAP_DRAW_INDIRECT:
232 case PIPE_CAP_MULTI_DRAW_INDIRECT:
233 case PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS:
234 case PIPE_CAP_VERTEXID_NOBASE:
235 case PIPE_CAP_MULTISAMPLE_Z_RESOLVE: /* potentially supported on some hw */
236 case PIPE_CAP_RESOURCE_FROM_USER_MEMORY:
237 case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
238 case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
239 case PIPE_CAP_DRAW_PARAMETERS:
240 case PIPE_CAP_TGSI_PACK_HALF_FLOAT:
241 case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL:
242 case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
243 case PIPE_CAP_GENERATE_MIPMAP:
244 case PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY:
245 case PIPE_CAP_SURFACE_REINTERPRET_BLOCKS:
246 case PIPE_CAP_QUERY_BUFFER_OBJECT:
247 case PIPE_CAP_QUERY_MEMORY_INFO:
248 case PIPE_CAP_PCI_GROUP:
249 case PIPE_CAP_PCI_BUS:
250 case PIPE_CAP_PCI_DEVICE:
251 case PIPE_CAP_PCI_FUNCTION:
252 case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
253 case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
254 case PIPE_CAP_PRIMITIVE_RESTART_FOR_PATCHES:
255 case PIPE_CAP_TGSI_VOTE:
256 case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED:
257 case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
258 case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
259 case PIPE_CAP_TGSI_CAN_READ_OUTPUTS:
260 case PIPE_CAP_NATIVE_FENCE_FD:
261 case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
262 case PIPE_CAP_TGSI_FS_FBFETCH:
263 case PIPE_CAP_DOUBLES:
264 case PIPE_CAP_INT64:
265 case PIPE_CAP_INT64_DIVMOD:
266 return 0;
267
268 case PIPE_CAP_VENDOR_ID:
269 return 0x10de;
270 case PIPE_CAP_DEVICE_ID: {
271 uint64_t device_id;
272 if (nouveau_getparam(dev, NOUVEAU_GETPARAM_PCI_DEVICE, &device_id)) {
273 NOUVEAU_ERR("NOUVEAU_GETPARAM_PCI_DEVICE failed.\n");
274 return -1;
275 }
276 return device_id;
277 }
278 case PIPE_CAP_ACCELERATED:
279 return 1;
280 case PIPE_CAP_VIDEO_MEMORY:
281 return dev->vram_size >> 20;
282 case PIPE_CAP_UMA:
283 return 0;
284 }
285
286 NOUVEAU_ERR("unknown PIPE_CAP %d\n", param);
287 return 0;
288 }
289
290 static int
291 nv50_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
292 enum pipe_shader_cap param)
293 {
294 switch (shader) {
295 case PIPE_SHADER_VERTEX:
296 case PIPE_SHADER_GEOMETRY:
297 case PIPE_SHADER_FRAGMENT:
298 break;
299 case PIPE_SHADER_COMPUTE:
300 default:
301 return 0;
302 }
303
304 switch (param) {
305 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
306 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
307 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
308 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
309 return 16384;
310 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
311 return 4;
312 case PIPE_SHADER_CAP_MAX_INPUTS:
313 if (shader == PIPE_SHADER_VERTEX)
314 return 32;
315 return 15;
316 case PIPE_SHADER_CAP_MAX_OUTPUTS:
317 return 16;
318 case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE:
319 return 65536;
320 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
321 return NV50_MAX_PIPE_CONSTBUFS;
322 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
323 return shader != PIPE_SHADER_FRAGMENT;
324 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
325 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
326 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
327 return 1;
328 case PIPE_SHADER_CAP_MAX_PREDS:
329 return 0;
330 case PIPE_SHADER_CAP_MAX_TEMPS:
331 return nv50_screen(pscreen)->max_tls_space / ONE_TEMP_SIZE;
332 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
333 return 1;
334 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
335 return 1;
336 case PIPE_SHADER_CAP_SUBROUTINES:
337 return 0; /* please inline, or provide function declarations */
338 case PIPE_SHADER_CAP_INTEGERS:
339 return 1;
340 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
341 /* The chip could handle more sampler views than samplers */
342 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
343 return MIN2(16, PIPE_MAX_SAMPLERS);
344 case PIPE_SHADER_CAP_PREFERRED_IR:
345 return PIPE_SHADER_IR_TGSI;
346 case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
347 return 32;
348 case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED:
349 case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED:
350 case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED:
351 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
352 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
353 case PIPE_SHADER_CAP_SUPPORTED_IRS:
354 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
355 case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
356 return 0;
357 default:
358 NOUVEAU_ERR("unknown PIPE_SHADER_CAP %d\n", param);
359 return 0;
360 }
361 }
362
363 static float
364 nv50_screen_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
365 {
366 switch (param) {
367 case PIPE_CAPF_MAX_LINE_WIDTH:
368 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
369 return 10.0f;
370 case PIPE_CAPF_MAX_POINT_WIDTH:
371 case PIPE_CAPF_MAX_POINT_WIDTH_AA:
372 return 64.0f;
373 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
374 return 16.0f;
375 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
376 return 4.0f;
377 case PIPE_CAPF_GUARD_BAND_LEFT:
378 case PIPE_CAPF_GUARD_BAND_TOP:
379 return 0.0f;
380 case PIPE_CAPF_GUARD_BAND_RIGHT:
381 case PIPE_CAPF_GUARD_BAND_BOTTOM:
382 return 0.0f; /* that or infinity */
383 }
384
385 NOUVEAU_ERR("unknown PIPE_CAPF %d\n", param);
386 return 0.0f;
387 }
388
389 static int
390 nv50_screen_get_compute_param(struct pipe_screen *pscreen,
391 enum pipe_shader_ir ir_type,
392 enum pipe_compute_cap param, void *data)
393 {
394 struct nv50_screen *screen = nv50_screen(pscreen);
395
396 #define RET(x) do { \
397 if (data) \
398 memcpy(data, x, sizeof(x)); \
399 return sizeof(x); \
400 } while (0)
401
402 switch (param) {
403 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
404 RET((uint64_t []) { 2 });
405 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
406 RET(((uint64_t []) { 65535, 65535 }));
407 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
408 RET(((uint64_t []) { 512, 512, 64 }));
409 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
410 RET((uint64_t []) { 512 });
411 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE: /* g0-15[] */
412 RET((uint64_t []) { 1ULL << 32 });
413 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE: /* s[] */
414 RET((uint64_t []) { 16 << 10 });
415 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE: /* l[] */
416 RET((uint64_t []) { 16 << 10 });
417 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE: /* c[], arbitrary limit */
418 RET((uint64_t []) { 4096 });
419 case PIPE_COMPUTE_CAP_SUBGROUP_SIZE:
420 RET((uint32_t []) { 32 });
421 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
422 RET((uint64_t []) { 1ULL << 40 });
423 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
424 RET((uint32_t []) { 0 });
425 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
426 RET((uint32_t []) { screen->mp_count });
427 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
428 RET((uint32_t []) { 512 }); /* FIXME: arbitrary limit */
429 case PIPE_COMPUTE_CAP_ADDRESS_BITS:
430 RET((uint32_t []) { 32 });
431 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
432 RET((uint64_t []) { 0 });
433 default:
434 return 0;
435 }
436
437 #undef RET
438 }
439
440 static void
441 nv50_screen_destroy(struct pipe_screen *pscreen)
442 {
443 struct nv50_screen *screen = nv50_screen(pscreen);
444
445 if (!nouveau_drm_screen_unref(&screen->base))
446 return;
447
448 if (screen->base.fence.current) {
449 struct nouveau_fence *current = NULL;
450
451 /* nouveau_fence_wait will create a new current fence, so wait on the
452 * _current_ one, and remove both.
453 */
454 nouveau_fence_ref(screen->base.fence.current, &current);
455 nouveau_fence_wait(current, NULL);
456 nouveau_fence_ref(NULL, &current);
457 nouveau_fence_ref(NULL, &screen->base.fence.current);
458 }
459 if (screen->base.pushbuf)
460 screen->base.pushbuf->user_priv = NULL;
461
462 if (screen->blitter)
463 nv50_blitter_destroy(screen);
464 if (screen->pm.prog) {
465 screen->pm.prog->code = NULL; /* hardcoded, don't FREE */
466 nv50_program_destroy(NULL, screen->pm.prog);
467 FREE(screen->pm.prog);
468 }
469
470 nouveau_bo_ref(NULL, &screen->code);
471 nouveau_bo_ref(NULL, &screen->tls_bo);
472 nouveau_bo_ref(NULL, &screen->stack_bo);
473 nouveau_bo_ref(NULL, &screen->txc);
474 nouveau_bo_ref(NULL, &screen->uniforms);
475 nouveau_bo_ref(NULL, &screen->fence.bo);
476
477 nouveau_heap_destroy(&screen->vp_code_heap);
478 nouveau_heap_destroy(&screen->gp_code_heap);
479 nouveau_heap_destroy(&screen->fp_code_heap);
480
481 FREE(screen->tic.entries);
482
483 nouveau_object_del(&screen->tesla);
484 nouveau_object_del(&screen->eng2d);
485 nouveau_object_del(&screen->m2mf);
486 nouveau_object_del(&screen->compute);
487 nouveau_object_del(&screen->sync);
488
489 nouveau_screen_fini(&screen->base);
490
491 FREE(screen);
492 }
493
494 static void
495 nv50_screen_fence_emit(struct pipe_screen *pscreen, u32 *sequence)
496 {
497 struct nv50_screen *screen = nv50_screen(pscreen);
498 struct nouveau_pushbuf *push = screen->base.pushbuf;
499
500 /* we need to do it after possible flush in MARK_RING */
501 *sequence = ++screen->base.fence.sequence;
502
503 assert(PUSH_AVAIL(push) + push->rsvd_kick >= 5);
504 PUSH_DATA (push, NV50_FIFO_PKHDR(NV50_3D(QUERY_ADDRESS_HIGH), 4));
505 PUSH_DATAh(push, screen->fence.bo->offset);
506 PUSH_DATA (push, screen->fence.bo->offset);
507 PUSH_DATA (push, *sequence);
508 PUSH_DATA (push, NV50_3D_QUERY_GET_MODE_WRITE_UNK0 |
509 NV50_3D_QUERY_GET_UNK4 |
510 NV50_3D_QUERY_GET_UNIT_CROP |
511 NV50_3D_QUERY_GET_TYPE_QUERY |
512 NV50_3D_QUERY_GET_QUERY_SELECT_ZERO |
513 NV50_3D_QUERY_GET_SHORT);
514 }
515
516 static u32
517 nv50_screen_fence_update(struct pipe_screen *pscreen)
518 {
519 return nv50_screen(pscreen)->fence.map[0];
520 }
521
522 static void
523 nv50_screen_init_hwctx(struct nv50_screen *screen)
524 {
525 struct nouveau_pushbuf *push = screen->base.pushbuf;
526 struct nv04_fifo *fifo;
527 unsigned i;
528
529 fifo = (struct nv04_fifo *)screen->base.channel->data;
530
531 BEGIN_NV04(push, SUBC_M2MF(NV01_SUBCHAN_OBJECT), 1);
532 PUSH_DATA (push, screen->m2mf->handle);
533 BEGIN_NV04(push, SUBC_M2MF(NV03_M2MF_DMA_NOTIFY), 3);
534 PUSH_DATA (push, screen->sync->handle);
535 PUSH_DATA (push, fifo->vram);
536 PUSH_DATA (push, fifo->vram);
537
538 BEGIN_NV04(push, SUBC_2D(NV01_SUBCHAN_OBJECT), 1);
539 PUSH_DATA (push, screen->eng2d->handle);
540 BEGIN_NV04(push, NV50_2D(DMA_NOTIFY), 4);
541 PUSH_DATA (push, screen->sync->handle);
542 PUSH_DATA (push, fifo->vram);
543 PUSH_DATA (push, fifo->vram);
544 PUSH_DATA (push, fifo->vram);
545 BEGIN_NV04(push, NV50_2D(OPERATION), 1);
546 PUSH_DATA (push, NV50_2D_OPERATION_SRCCOPY);
547 BEGIN_NV04(push, NV50_2D(CLIP_ENABLE), 1);
548 PUSH_DATA (push, 0);
549 BEGIN_NV04(push, NV50_2D(COLOR_KEY_ENABLE), 1);
550 PUSH_DATA (push, 0);
551 BEGIN_NV04(push, SUBC_2D(0x0888), 1);
552 PUSH_DATA (push, 1);
553 BEGIN_NV04(push, NV50_2D(COND_MODE), 1);
554 PUSH_DATA (push, NV50_2D_COND_MODE_ALWAYS);
555
556 BEGIN_NV04(push, SUBC_3D(NV01_SUBCHAN_OBJECT), 1);
557 PUSH_DATA (push, screen->tesla->handle);
558
559 BEGIN_NV04(push, NV50_3D(COND_MODE), 1);
560 PUSH_DATA (push, NV50_3D_COND_MODE_ALWAYS);
561
562 BEGIN_NV04(push, NV50_3D(DMA_NOTIFY), 1);
563 PUSH_DATA (push, screen->sync->handle);
564 BEGIN_NV04(push, NV50_3D(DMA_ZETA), 11);
565 for (i = 0; i < 11; ++i)
566 PUSH_DATA(push, fifo->vram);
567 BEGIN_NV04(push, NV50_3D(DMA_COLOR(0)), NV50_3D_DMA_COLOR__LEN);
568 for (i = 0; i < NV50_3D_DMA_COLOR__LEN; ++i)
569 PUSH_DATA(push, fifo->vram);
570
571 BEGIN_NV04(push, NV50_3D(REG_MODE), 1);
572 PUSH_DATA (push, NV50_3D_REG_MODE_STRIPED);
573 BEGIN_NV04(push, NV50_3D(UNK1400_LANES), 1);
574 PUSH_DATA (push, 0xf);
575
576 if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", true)) {
577 BEGIN_NV04(push, NV50_3D(WATCHDOG_TIMER), 1);
578 PUSH_DATA (push, 0x18);
579 }
580
581 BEGIN_NV04(push, NV50_3D(ZETA_COMP_ENABLE), 1);
582 PUSH_DATA(push, screen->base.drm->version >= 0x01000101);
583
584 BEGIN_NV04(push, NV50_3D(RT_COMP_ENABLE(0)), 8);
585 for (i = 0; i < 8; ++i)
586 PUSH_DATA(push, screen->base.drm->version >= 0x01000101);
587
588 BEGIN_NV04(push, NV50_3D(RT_CONTROL), 1);
589 PUSH_DATA (push, 1);
590
591 BEGIN_NV04(push, NV50_3D(CSAA_ENABLE), 1);
592 PUSH_DATA (push, 0);
593 BEGIN_NV04(push, NV50_3D(MULTISAMPLE_ENABLE), 1);
594 PUSH_DATA (push, 0);
595 BEGIN_NV04(push, NV50_3D(MULTISAMPLE_MODE), 1);
596 PUSH_DATA (push, NV50_3D_MULTISAMPLE_MODE_MS1);
597 BEGIN_NV04(push, NV50_3D(MULTISAMPLE_CTRL), 1);
598 PUSH_DATA (push, 0);
599 BEGIN_NV04(push, NV50_3D(PRIM_RESTART_WITH_DRAW_ARRAYS), 1);
600 PUSH_DATA (push, 1);
601 BEGIN_NV04(push, NV50_3D(BLEND_SEPARATE_ALPHA), 1);
602 PUSH_DATA (push, 1);
603
604 if (screen->tesla->oclass >= NVA0_3D_CLASS) {
605 BEGIN_NV04(push, SUBC_3D(NVA0_3D_TEX_MISC), 1);
606 PUSH_DATA (push, 0);
607 }
608
609 BEGIN_NV04(push, NV50_3D(SCREEN_Y_CONTROL), 1);
610 PUSH_DATA (push, 0);
611 BEGIN_NV04(push, NV50_3D(WINDOW_OFFSET_X), 2);
612 PUSH_DATA (push, 0);
613 PUSH_DATA (push, 0);
614 BEGIN_NV04(push, NV50_3D(ZCULL_REGION), 1);
615 PUSH_DATA (push, 0x3f);
616
617 BEGIN_NV04(push, NV50_3D(VP_ADDRESS_HIGH), 2);
618 PUSH_DATAh(push, screen->code->offset + (0 << NV50_CODE_BO_SIZE_LOG2));
619 PUSH_DATA (push, screen->code->offset + (0 << NV50_CODE_BO_SIZE_LOG2));
620
621 BEGIN_NV04(push, NV50_3D(FP_ADDRESS_HIGH), 2);
622 PUSH_DATAh(push, screen->code->offset + (1 << NV50_CODE_BO_SIZE_LOG2));
623 PUSH_DATA (push, screen->code->offset + (1 << NV50_CODE_BO_SIZE_LOG2));
624
625 BEGIN_NV04(push, NV50_3D(GP_ADDRESS_HIGH), 2);
626 PUSH_DATAh(push, screen->code->offset + (2 << NV50_CODE_BO_SIZE_LOG2));
627 PUSH_DATA (push, screen->code->offset + (2 << NV50_CODE_BO_SIZE_LOG2));
628
629 BEGIN_NV04(push, NV50_3D(LOCAL_ADDRESS_HIGH), 3);
630 PUSH_DATAh(push, screen->tls_bo->offset);
631 PUSH_DATA (push, screen->tls_bo->offset);
632 PUSH_DATA (push, util_logbase2(screen->cur_tls_space / 8));
633
634 BEGIN_NV04(push, NV50_3D(STACK_ADDRESS_HIGH), 3);
635 PUSH_DATAh(push, screen->stack_bo->offset);
636 PUSH_DATA (push, screen->stack_bo->offset);
637 PUSH_DATA (push, 4);
638
639 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
640 PUSH_DATAh(push, screen->uniforms->offset + (0 << 16));
641 PUSH_DATA (push, screen->uniforms->offset + (0 << 16));
642 PUSH_DATA (push, (NV50_CB_PVP << 16) | 0x0000);
643
644 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
645 PUSH_DATAh(push, screen->uniforms->offset + (1 << 16));
646 PUSH_DATA (push, screen->uniforms->offset + (1 << 16));
647 PUSH_DATA (push, (NV50_CB_PGP << 16) | 0x0000);
648
649 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
650 PUSH_DATAh(push, screen->uniforms->offset + (2 << 16));
651 PUSH_DATA (push, screen->uniforms->offset + (2 << 16));
652 PUSH_DATA (push, (NV50_CB_PFP << 16) | 0x0000);
653
654 BEGIN_NV04(push, NV50_3D(CB_DEF_ADDRESS_HIGH), 3);
655 PUSH_DATAh(push, screen->uniforms->offset + (3 << 16));
656 PUSH_DATA (push, screen->uniforms->offset + (3 << 16));
657 PUSH_DATA (push, (NV50_CB_AUX << 16) | (NV50_CB_AUX_SIZE & 0xffff));
658
659 BEGIN_NI04(push, NV50_3D(SET_PROGRAM_CB), 3);
660 PUSH_DATA (push, (NV50_CB_AUX << 12) | 0xf01);
661 PUSH_DATA (push, (NV50_CB_AUX << 12) | 0xf21);
662 PUSH_DATA (push, (NV50_CB_AUX << 12) | 0xf31);
663
664 /* return { 0.0, 0.0, 0.0, 0.0 } on out-of-bounds vtxbuf access */
665 BEGIN_NV04(push, NV50_3D(CB_ADDR), 1);
666 PUSH_DATA (push, (NV50_CB_AUX_RUNOUT_OFFSET << (8 - 2)) | NV50_CB_AUX);
667 BEGIN_NI04(push, NV50_3D(CB_DATA(0)), 4);
668 PUSH_DATAf(push, 0.0f);
669 PUSH_DATAf(push, 0.0f);
670 PUSH_DATAf(push, 0.0f);
671 PUSH_DATAf(push, 0.0f);
672 BEGIN_NV04(push, NV50_3D(VERTEX_RUNOUT_ADDRESS_HIGH), 2);
673 PUSH_DATAh(push, screen->uniforms->offset + (3 << 16) + NV50_CB_AUX_RUNOUT_OFFSET);
674 PUSH_DATA (push, screen->uniforms->offset + (3 << 16) + NV50_CB_AUX_RUNOUT_OFFSET);
675
676 nv50_upload_ms_info(push);
677
678 /* max TIC (bits 4:8) & TSC bindings, per program type */
679 for (i = 0; i < 3; ++i) {
680 BEGIN_NV04(push, NV50_3D(TEX_LIMITS(i)), 1);
681 PUSH_DATA (push, 0x54);
682 }
683
684 BEGIN_NV04(push, NV50_3D(TIC_ADDRESS_HIGH), 3);
685 PUSH_DATAh(push, screen->txc->offset);
686 PUSH_DATA (push, screen->txc->offset);
687 PUSH_DATA (push, NV50_TIC_MAX_ENTRIES - 1);
688
689 BEGIN_NV04(push, NV50_3D(TSC_ADDRESS_HIGH), 3);
690 PUSH_DATAh(push, screen->txc->offset + 65536);
691 PUSH_DATA (push, screen->txc->offset + 65536);
692 PUSH_DATA (push, NV50_TSC_MAX_ENTRIES - 1);
693
694 BEGIN_NV04(push, NV50_3D(LINKED_TSC), 1);
695 PUSH_DATA (push, 0);
696
697 BEGIN_NV04(push, NV50_3D(CLIP_RECTS_EN), 1);
698 PUSH_DATA (push, 0);
699 BEGIN_NV04(push, NV50_3D(CLIP_RECTS_MODE), 1);
700 PUSH_DATA (push, NV50_3D_CLIP_RECTS_MODE_INSIDE_ANY);
701 BEGIN_NV04(push, NV50_3D(CLIP_RECT_HORIZ(0)), 8 * 2);
702 for (i = 0; i < 8 * 2; ++i)
703 PUSH_DATA(push, 0);
704 BEGIN_NV04(push, NV50_3D(CLIPID_ENABLE), 1);
705 PUSH_DATA (push, 0);
706
707 BEGIN_NV04(push, NV50_3D(VIEWPORT_TRANSFORM_EN), 1);
708 PUSH_DATA (push, 1);
709 for (i = 0; i < NV50_MAX_VIEWPORTS; i++) {
710 BEGIN_NV04(push, NV50_3D(DEPTH_RANGE_NEAR(i)), 2);
711 PUSH_DATAf(push, 0.0f);
712 PUSH_DATAf(push, 1.0f);
713 BEGIN_NV04(push, NV50_3D(VIEWPORT_HORIZ(i)), 2);
714 PUSH_DATA (push, 8192 << 16);
715 PUSH_DATA (push, 8192 << 16);
716 }
717
718 BEGIN_NV04(push, NV50_3D(VIEW_VOLUME_CLIP_CTRL), 1);
719 #ifdef NV50_SCISSORS_CLIPPING
720 PUSH_DATA (push, 0x0000);
721 #else
722 PUSH_DATA (push, 0x1080);
723 #endif
724
725 BEGIN_NV04(push, NV50_3D(CLEAR_FLAGS), 1);
726 PUSH_DATA (push, NV50_3D_CLEAR_FLAGS_CLEAR_RECT_VIEWPORT);
727
728 /* We use scissors instead of exact view volume clipping,
729 * so they're always enabled.
730 */
731 for (i = 0; i < NV50_MAX_VIEWPORTS; i++) {
732 BEGIN_NV04(push, NV50_3D(SCISSOR_ENABLE(i)), 3);
733 PUSH_DATA (push, 1);
734 PUSH_DATA (push, 8192 << 16);
735 PUSH_DATA (push, 8192 << 16);
736 }
737
738 BEGIN_NV04(push, NV50_3D(RASTERIZE_ENABLE), 1);
739 PUSH_DATA (push, 1);
740 BEGIN_NV04(push, NV50_3D(POINT_RASTER_RULES), 1);
741 PUSH_DATA (push, NV50_3D_POINT_RASTER_RULES_OGL);
742 BEGIN_NV04(push, NV50_3D(FRAG_COLOR_CLAMP_EN), 1);
743 PUSH_DATA (push, 0x11111111);
744 BEGIN_NV04(push, NV50_3D(EDGEFLAG), 1);
745 PUSH_DATA (push, 1);
746
747 BEGIN_NV04(push, NV50_3D(VB_ELEMENT_BASE), 1);
748 PUSH_DATA (push, 0);
749 if (screen->base.class_3d >= NV84_3D_CLASS) {
750 BEGIN_NV04(push, NV84_3D(VERTEX_ID_BASE), 1);
751 PUSH_DATA (push, 0);
752 }
753
754 PUSH_KICK (push);
755 }
756
757 static int nv50_tls_alloc(struct nv50_screen *screen, unsigned tls_space,
758 uint64_t *tls_size)
759 {
760 struct nouveau_device *dev = screen->base.device;
761 int ret;
762
763 screen->cur_tls_space = util_next_power_of_two(tls_space / ONE_TEMP_SIZE) *
764 ONE_TEMP_SIZE;
765 if (nouveau_mesa_debug)
766 debug_printf("allocating space for %u temps\n",
767 util_next_power_of_two(tls_space / ONE_TEMP_SIZE));
768 *tls_size = screen->cur_tls_space * util_next_power_of_two(screen->TPs) *
769 screen->MPsInTP * LOCAL_WARPS_ALLOC * THREADS_IN_WARP;
770
771 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16,
772 *tls_size, NULL, &screen->tls_bo);
773 if (ret) {
774 NOUVEAU_ERR("Failed to allocate local bo: %d\n", ret);
775 return ret;
776 }
777
778 return 0;
779 }
780
781 int nv50_tls_realloc(struct nv50_screen *screen, unsigned tls_space)
782 {
783 struct nouveau_pushbuf *push = screen->base.pushbuf;
784 int ret;
785 uint64_t tls_size;
786
787 if (tls_space < screen->cur_tls_space)
788 return 0;
789 if (tls_space > screen->max_tls_space) {
790 /* fixable by limiting number of warps (LOCAL_WARPS_LOG_ALLOC /
791 * LOCAL_WARPS_NO_CLAMP) */
792 NOUVEAU_ERR("Unsupported number of temporaries (%u > %u). Fixable if someone cares.\n",
793 (unsigned)(tls_space / ONE_TEMP_SIZE),
794 (unsigned)(screen->max_tls_space / ONE_TEMP_SIZE));
795 return -ENOMEM;
796 }
797
798 nouveau_bo_ref(NULL, &screen->tls_bo);
799 ret = nv50_tls_alloc(screen, tls_space, &tls_size);
800 if (ret)
801 return ret;
802
803 BEGIN_NV04(push, NV50_3D(LOCAL_ADDRESS_HIGH), 3);
804 PUSH_DATAh(push, screen->tls_bo->offset);
805 PUSH_DATA (push, screen->tls_bo->offset);
806 PUSH_DATA (push, util_logbase2(screen->cur_tls_space / 8));
807
808 return 1;
809 }
810
811 struct nouveau_screen *
812 nv50_screen_create(struct nouveau_device *dev)
813 {
814 struct nv50_screen *screen;
815 struct pipe_screen *pscreen;
816 struct nouveau_object *chan;
817 uint64_t value;
818 uint32_t tesla_class;
819 unsigned stack_size;
820 int ret;
821
822 screen = CALLOC_STRUCT(nv50_screen);
823 if (!screen)
824 return NULL;
825 pscreen = &screen->base.base;
826 pscreen->destroy = nv50_screen_destroy;
827
828 ret = nouveau_screen_init(&screen->base, dev);
829 if (ret) {
830 NOUVEAU_ERR("nouveau_screen_init failed: %d\n", ret);
831 goto fail;
832 }
833
834 /* TODO: Prevent FIFO prefetch before transfer of index buffers and
835 * admit them to VRAM.
836 */
837 screen->base.vidmem_bindings |= PIPE_BIND_CONSTANT_BUFFER |
838 PIPE_BIND_VERTEX_BUFFER;
839 screen->base.sysmem_bindings |=
840 PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER;
841
842 screen->base.pushbuf->user_priv = screen;
843 screen->base.pushbuf->rsvd_kick = 5;
844
845 chan = screen->base.channel;
846
847 pscreen->context_create = nv50_create;
848 pscreen->is_format_supported = nv50_screen_is_format_supported;
849 pscreen->get_param = nv50_screen_get_param;
850 pscreen->get_shader_param = nv50_screen_get_shader_param;
851 pscreen->get_paramf = nv50_screen_get_paramf;
852 pscreen->get_compute_param = nv50_screen_get_compute_param;
853 pscreen->get_driver_query_info = nv50_screen_get_driver_query_info;
854 pscreen->get_driver_query_group_info = nv50_screen_get_driver_query_group_info;
855
856 nv50_screen_init_resource_functions(pscreen);
857
858 if (screen->base.device->chipset < 0x84 ||
859 debug_get_bool_option("NOUVEAU_PMPEG", false)) {
860 /* PMPEG */
861 nouveau_screen_init_vdec(&screen->base);
862 } else if (screen->base.device->chipset < 0x98 ||
863 screen->base.device->chipset == 0xa0) {
864 /* VP2 */
865 screen->base.base.get_video_param = nv84_screen_get_video_param;
866 screen->base.base.is_video_format_supported = nv84_screen_video_supported;
867 } else {
868 /* VP3/4 */
869 screen->base.base.get_video_param = nouveau_vp3_screen_get_video_param;
870 screen->base.base.is_video_format_supported = nouveau_vp3_screen_video_supported;
871 }
872
873 ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096,
874 NULL, &screen->fence.bo);
875 if (ret) {
876 NOUVEAU_ERR("Failed to allocate fence bo: %d\n", ret);
877 goto fail;
878 }
879
880 nouveau_bo_map(screen->fence.bo, 0, NULL);
881 screen->fence.map = screen->fence.bo->map;
882 screen->base.fence.emit = nv50_screen_fence_emit;
883 screen->base.fence.update = nv50_screen_fence_update;
884
885 ret = nouveau_object_new(chan, 0xbeef0301, NOUVEAU_NOTIFIER_CLASS,
886 &(struct nv04_notify){ .length = 32 },
887 sizeof(struct nv04_notify), &screen->sync);
888 if (ret) {
889 NOUVEAU_ERR("Failed to allocate notifier: %d\n", ret);
890 goto fail;
891 }
892
893 ret = nouveau_object_new(chan, 0xbeef5039, NV50_M2MF_CLASS,
894 NULL, 0, &screen->m2mf);
895 if (ret) {
896 NOUVEAU_ERR("Failed to allocate PGRAPH context for M2MF: %d\n", ret);
897 goto fail;
898 }
899
900 ret = nouveau_object_new(chan, 0xbeef502d, NV50_2D_CLASS,
901 NULL, 0, &screen->eng2d);
902 if (ret) {
903 NOUVEAU_ERR("Failed to allocate PGRAPH context for 2D: %d\n", ret);
904 goto fail;
905 }
906
907 switch (dev->chipset & 0xf0) {
908 case 0x50:
909 tesla_class = NV50_3D_CLASS;
910 break;
911 case 0x80:
912 case 0x90:
913 tesla_class = NV84_3D_CLASS;
914 break;
915 case 0xa0:
916 switch (dev->chipset) {
917 case 0xa0:
918 case 0xaa:
919 case 0xac:
920 tesla_class = NVA0_3D_CLASS;
921 break;
922 case 0xaf:
923 tesla_class = NVAF_3D_CLASS;
924 break;
925 default:
926 tesla_class = NVA3_3D_CLASS;
927 break;
928 }
929 break;
930 default:
931 NOUVEAU_ERR("Not a known NV50 chipset: NV%02x\n", dev->chipset);
932 goto fail;
933 }
934 screen->base.class_3d = tesla_class;
935
936 ret = nouveau_object_new(chan, 0xbeef5097, tesla_class,
937 NULL, 0, &screen->tesla);
938 if (ret) {
939 NOUVEAU_ERR("Failed to allocate PGRAPH context for 3D: %d\n", ret);
940 goto fail;
941 }
942
943 /* This over-allocates by a page. The GP, which would execute at the end of
944 * the last page, would trigger faults. The going theory is that it
945 * prefetches up to a certain amount.
946 */
947 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16,
948 (3 << NV50_CODE_BO_SIZE_LOG2) + 0x1000,
949 NULL, &screen->code);
950 if (ret) {
951 NOUVEAU_ERR("Failed to allocate code bo: %d\n", ret);
952 goto fail;
953 }
954
955 nouveau_heap_init(&screen->vp_code_heap, 0, 1 << NV50_CODE_BO_SIZE_LOG2);
956 nouveau_heap_init(&screen->gp_code_heap, 0, 1 << NV50_CODE_BO_SIZE_LOG2);
957 nouveau_heap_init(&screen->fp_code_heap, 0, 1 << NV50_CODE_BO_SIZE_LOG2);
958
959 nouveau_getparam(dev, NOUVEAU_GETPARAM_GRAPH_UNITS, &value);
960
961 screen->TPs = util_bitcount(value & 0xffff);
962 screen->MPsInTP = util_bitcount((value >> 24) & 0xf);
963
964 screen->mp_count = screen->TPs * screen->MPsInTP;
965
966 stack_size = util_next_power_of_two(screen->TPs) * screen->MPsInTP *
967 STACK_WARPS_ALLOC * 64 * 8;
968
969 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16, stack_size, NULL,
970 &screen->stack_bo);
971 if (ret) {
972 NOUVEAU_ERR("Failed to allocate stack bo: %d\n", ret);
973 goto fail;
974 }
975
976 uint64_t size_of_one_temp = util_next_power_of_two(screen->TPs) *
977 screen->MPsInTP * LOCAL_WARPS_ALLOC * THREADS_IN_WARP *
978 ONE_TEMP_SIZE;
979 screen->max_tls_space = dev->vram_size / size_of_one_temp * ONE_TEMP_SIZE;
980 screen->max_tls_space /= 2; /* half of vram */
981
982 /* hw can address max 64 KiB */
983 screen->max_tls_space = MIN2(screen->max_tls_space, 64 << 10);
984
985 uint64_t tls_size;
986 unsigned tls_space = 4/*temps*/ * ONE_TEMP_SIZE;
987 ret = nv50_tls_alloc(screen, tls_space, &tls_size);
988 if (ret)
989 goto fail;
990
991 if (nouveau_mesa_debug)
992 debug_printf("TPs = %u, MPsInTP = %u, VRAM = %"PRIu64" MiB, tls_size = %"PRIu64" KiB\n",
993 screen->TPs, screen->MPsInTP, dev->vram_size >> 20, tls_size >> 10);
994
995 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16, 4 << 16, NULL,
996 &screen->uniforms);
997 if (ret) {
998 NOUVEAU_ERR("Failed to allocate uniforms bo: %d\n", ret);
999 goto fail;
1000 }
1001
1002 ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16, 3 << 16, NULL,
1003 &screen->txc);
1004 if (ret) {
1005 NOUVEAU_ERR("Failed to allocate TIC/TSC bo: %d\n", ret);
1006 goto fail;
1007 }
1008
1009 screen->tic.entries = CALLOC(4096, sizeof(void *));
1010 screen->tsc.entries = screen->tic.entries + 2048;
1011
1012 if (!nv50_blitter_create(screen))
1013 goto fail;
1014
1015 nv50_screen_init_hwctx(screen);
1016
1017 ret = nv50_screen_compute_setup(screen, screen->base.pushbuf);
1018 if (ret) {
1019 NOUVEAU_ERR("Failed to init compute context: %d\n", ret);
1020 goto fail;
1021 }
1022
1023 nouveau_fence_new(&screen->base, &screen->base.fence.current);
1024
1025 return &screen->base;
1026
1027 fail:
1028 screen->base.base.context_create = NULL;
1029 return &screen->base;
1030 }
1031
1032 int
1033 nv50_screen_tic_alloc(struct nv50_screen *screen, void *entry)
1034 {
1035 int i = screen->tic.next;
1036
1037 while (screen->tic.lock[i / 32] & (1 << (i % 32)))
1038 i = (i + 1) & (NV50_TIC_MAX_ENTRIES - 1);
1039
1040 screen->tic.next = (i + 1) & (NV50_TIC_MAX_ENTRIES - 1);
1041
1042 if (screen->tic.entries[i])
1043 nv50_tic_entry(screen->tic.entries[i])->id = -1;
1044
1045 screen->tic.entries[i] = entry;
1046 return i;
1047 }
1048
1049 int
1050 nv50_screen_tsc_alloc(struct nv50_screen *screen, void *entry)
1051 {
1052 int i = screen->tsc.next;
1053
1054 while (screen->tsc.lock[i / 32] & (1 << (i % 32)))
1055 i = (i + 1) & (NV50_TSC_MAX_ENTRIES - 1);
1056
1057 screen->tsc.next = (i + 1) & (NV50_TSC_MAX_ENTRIES - 1);
1058
1059 if (screen->tsc.entries[i])
1060 nv50_tsc_entry(screen->tsc.entries[i])->id = -1;
1061
1062 screen->tsc.entries[i] = entry;
1063 return i;
1064 }