nouveau: fix undefined behaviour when testing sample_count
[mesa.git] / src / gallium / drivers / nvc0 / nvc0_program.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include "pipe/p_defines.h"
24
25 #include "nvc0_context.h"
26
27 #include "nv50/codegen/nv50_ir_driver.h"
28
29 /* If only they told use the actual semantic instead of just GENERIC ... */
30 static void
31 nvc0_mesa_varying_hack(struct nv50_ir_varying *var)
32 {
33 unsigned c;
34
35 if (var->sn != TGSI_SEMANTIC_GENERIC)
36 return;
37
38 if (var->si <= 7) /* gl_TexCoord */
39 for (c = 0; c < 4; ++c)
40 var->slot[c] = (0x300 + var->si * 0x10 + c * 0x4) / 4;
41 else
42 if (var->si == 9) /* gl_PointCoord */
43 for (c = 0; c < 4; ++c)
44 var->slot[c] = (0x2e0 + c * 0x4) / 4;
45 else
46 if (var->si <= 39)
47 for (c = 0; c < 4; ++c) /* move down user varyings (first has index 8) */
48 var->slot[c] -= 0x80 / 4;
49 else {
50 NOUVEAU_ERR("too many varyings / invalid location: %u !\n", var->si);
51 for (c = 0; c < 4; ++c)
52 var->slot[c] = (0x270 + c * 0x4) / 4; /* catch invalid indices */
53 }
54 }
55
56 static uint32_t
57 nvc0_shader_input_address(unsigned sn, unsigned si, unsigned ubase)
58 {
59 switch (sn) {
60 case NV50_SEMANTIC_TESSFACTOR: return 0x000 + si * 0x4;
61 case TGSI_SEMANTIC_PRIMID: return 0x060;
62 case TGSI_SEMANTIC_PSIZE: return 0x06c;
63 case TGSI_SEMANTIC_POSITION: return 0x070;
64 case TGSI_SEMANTIC_GENERIC: return ubase + si * 0x10;
65 case TGSI_SEMANTIC_FOG: return 0x270;
66 case TGSI_SEMANTIC_COLOR: return 0x280 + si * 0x10;
67 case TGSI_SEMANTIC_BCOLOR: return 0x2a0 + si * 0x10;
68 case NV50_SEMANTIC_CLIPDISTANCE: return 0x2c0 + si * 0x4;
69 case TGSI_SEMANTIC_CLIPDIST: return 0x2c0 + si * 0x10;
70 case TGSI_SEMANTIC_CLIPVERTEX: return 0x260;
71 case NV50_SEMANTIC_POINTCOORD: return 0x2e0;
72 case NV50_SEMANTIC_TESSCOORD: return 0x2f0;
73 case TGSI_SEMANTIC_INSTANCEID: return 0x2f8;
74 case TGSI_SEMANTIC_VERTEXID: return 0x2fc;
75 case NV50_SEMANTIC_TEXCOORD: return 0x300 + si * 0x10;
76 case TGSI_SEMANTIC_FACE: return 0x3fc;
77 case NV50_SEMANTIC_INVOCATIONID: return ~0;
78 default:
79 assert(!"invalid TGSI input semantic");
80 return ~0;
81 }
82 }
83
84 static uint32_t
85 nvc0_shader_output_address(unsigned sn, unsigned si, unsigned ubase)
86 {
87 switch (sn) {
88 case NV50_SEMANTIC_TESSFACTOR: return 0x000 + si * 0x4;
89 case TGSI_SEMANTIC_PRIMID: return 0x060;
90 case NV50_SEMANTIC_LAYER: return 0x064;
91 case NV50_SEMANTIC_VIEWPORTINDEX: return 0x068;
92 case TGSI_SEMANTIC_PSIZE: return 0x06c;
93 case TGSI_SEMANTIC_POSITION: return 0x070;
94 case TGSI_SEMANTIC_GENERIC: return ubase + si * 0x10;
95 case TGSI_SEMANTIC_FOG: return 0x270;
96 case TGSI_SEMANTIC_COLOR: return 0x280 + si * 0x10;
97 case TGSI_SEMANTIC_BCOLOR: return 0x2a0 + si * 0x10;
98 case NV50_SEMANTIC_CLIPDISTANCE: return 0x2c0 + si * 0x4;
99 case TGSI_SEMANTIC_CLIPDIST: return 0x2c0 + si * 0x10;
100 case TGSI_SEMANTIC_CLIPVERTEX: return 0x260;
101 case NV50_SEMANTIC_TEXCOORD: return 0x300 + si * 0x10;
102 case TGSI_SEMANTIC_EDGEFLAG: return ~0;
103 default:
104 assert(!"invalid TGSI output semantic");
105 return ~0;
106 }
107 }
108
109 static int
110 nvc0_vp_assign_input_slots(struct nv50_ir_prog_info *info)
111 {
112 unsigned i, c, n;
113
114 for (n = 0, i = 0; i < info->numInputs; ++i) {
115 switch (info->in[i].sn) {
116 case TGSI_SEMANTIC_INSTANCEID: /* for SM4 only, in TGSI they're SVs */
117 case TGSI_SEMANTIC_VERTEXID:
118 info->in[i].mask = 0x1;
119 info->in[i].slot[0] =
120 nvc0_shader_input_address(info->in[i].sn, 0, 0) / 4;
121 continue;
122 default:
123 break;
124 }
125 for (c = 0; c < 4; ++c)
126 info->in[i].slot[c] = (0x80 + n * 0x10 + c * 0x4) / 4;
127 ++n;
128 }
129
130 return 0;
131 }
132
133 static int
134 nvc0_sp_assign_input_slots(struct nv50_ir_prog_info *info)
135 {
136 unsigned ubase = MAX2(0x80, 0x20 + info->numPatchConstants * 0x10);
137 unsigned offset;
138 unsigned i, c;
139
140 for (i = 0; i < info->numInputs; ++i) {
141 offset = nvc0_shader_input_address(info->in[i].sn,
142 info->in[i].si, ubase);
143 if (info->in[i].patch && offset >= 0x20)
144 offset = 0x20 + info->in[i].si * 0x10;
145
146 if (info->in[i].sn == NV50_SEMANTIC_TESSCOORD)
147 info->in[i].mask &= 3;
148
149 for (c = 0; c < 4; ++c)
150 info->in[i].slot[c] = (offset + c * 0x4) / 4;
151
152 nvc0_mesa_varying_hack(&info->in[i]);
153 }
154
155 return 0;
156 }
157
158 static int
159 nvc0_fp_assign_output_slots(struct nv50_ir_prog_info *info)
160 {
161 unsigned count = info->prop.fp.numColourResults * 4;
162 unsigned i, c;
163
164 for (i = 0; i < info->numOutputs; ++i)
165 if (info->out[i].sn == TGSI_SEMANTIC_COLOR)
166 for (c = 0; c < 4; ++c)
167 info->out[i].slot[c] = info->out[i].si * 4 + c;
168
169 if (info->io.sampleMask < PIPE_MAX_SHADER_OUTPUTS)
170 info->out[info->io.sampleMask].slot[0] = count++;
171 else
172 if (info->target >= 0xe0)
173 count++; /* on Kepler, depth is always last colour reg + 2 */
174
175 if (info->io.fragDepth < PIPE_MAX_SHADER_OUTPUTS)
176 info->out[info->io.fragDepth].slot[2] = count;
177
178 return 0;
179 }
180
181 static int
182 nvc0_sp_assign_output_slots(struct nv50_ir_prog_info *info)
183 {
184 unsigned ubase = MAX2(0x80, 0x20 + info->numPatchConstants * 0x10);
185 unsigned offset;
186 unsigned i, c;
187
188 for (i = 0; i < info->numOutputs; ++i) {
189 offset = nvc0_shader_output_address(info->out[i].sn,
190 info->out[i].si, ubase);
191 if (info->out[i].patch && offset >= 0x20)
192 offset = 0x20 + info->out[i].si * 0x10;
193
194 for (c = 0; c < 4; ++c)
195 info->out[i].slot[c] = (offset + c * 0x4) / 4;
196
197 nvc0_mesa_varying_hack(&info->out[i]);
198 }
199
200 return 0;
201 }
202
203 static int
204 nvc0_program_assign_varying_slots(struct nv50_ir_prog_info *info)
205 {
206 int ret;
207
208 if (info->type == PIPE_SHADER_VERTEX)
209 ret = nvc0_vp_assign_input_slots(info);
210 else
211 ret = nvc0_sp_assign_input_slots(info);
212 if (ret)
213 return ret;
214
215 if (info->type == PIPE_SHADER_FRAGMENT)
216 ret = nvc0_fp_assign_output_slots(info);
217 else
218 ret = nvc0_sp_assign_output_slots(info);
219 return ret;
220 }
221
222 static INLINE void
223 nvc0_vtgp_hdr_update_oread(struct nvc0_program *vp, uint8_t slot)
224 {
225 uint8_t min = (vp->hdr[4] >> 12) & 0xff;
226 uint8_t max = (vp->hdr[4] >> 24);
227
228 min = MIN2(min, slot);
229 max = MAX2(max, slot);
230
231 vp->hdr[4] = (max << 24) | (min << 12);
232 }
233
234 /* Common part of header generation for VP, TCP, TEP and GP. */
235 static int
236 nvc0_vtgp_gen_header(struct nvc0_program *vp, struct nv50_ir_prog_info *info)
237 {
238 unsigned i, c, a;
239
240 for (i = 0; i < info->numInputs; ++i) {
241 if (info->in[i].patch)
242 continue;
243 for (c = 0; c < 4; ++c) {
244 a = info->in[i].slot[c];
245 if (info->in[i].mask & (1 << c)) {
246 if (info->in[i].sn != NV50_SEMANTIC_TESSCOORD)
247 vp->hdr[5 + a / 32] |= 1 << (a % 32);
248 else
249 nvc0_vtgp_hdr_update_oread(vp, info->in[i].slot[c]);
250 }
251 }
252 }
253
254 for (i = 0; i < info->numOutputs; ++i) {
255 if (info->out[i].patch)
256 continue;
257 for (c = 0; c < 4; ++c) {
258 if (!(info->out[i].mask & (1 << c)))
259 continue;
260 assert(info->out[i].slot[c] >= 0x40 / 4);
261 a = info->out[i].slot[c] - 0x40 / 4;
262 vp->hdr[13 + a / 32] |= 1 << (a % 32);
263 if (info->out[i].oread)
264 nvc0_vtgp_hdr_update_oread(vp, info->out[i].slot[c]);
265 }
266 }
267
268 for (i = 0; i < info->numSysVals; ++i) {
269 switch (info->sv[i].sn) {
270 case TGSI_SEMANTIC_PRIMID:
271 vp->hdr[5] |= 1 << 24;
272 break;
273 case TGSI_SEMANTIC_INSTANCEID:
274 vp->hdr[10] |= 1 << 30;
275 break;
276 case TGSI_SEMANTIC_VERTEXID:
277 vp->hdr[10] |= 1 << 31;
278 break;
279 default:
280 break;
281 }
282 }
283
284 vp->vp.clip_enable = info->io.clipDistanceMask;
285 for (i = 0; i < 8; ++i)
286 if (info->io.cullDistanceMask & (1 << i))
287 vp->vp.clip_mode |= 1 << (i * 4);
288
289 if (info->io.genUserClip < 0)
290 vp->vp.num_ucps = PIPE_MAX_CLIP_PLANES + 1; /* prevent rebuilding */
291
292 return 0;
293 }
294
295 static int
296 nvc0_vp_gen_header(struct nvc0_program *vp, struct nv50_ir_prog_info *info)
297 {
298 vp->hdr[0] = 0x20061 | (1 << 10);
299 vp->hdr[4] = 0xff000;
300
301 vp->hdr[18] = info->io.clipDistanceMask;
302
303 return nvc0_vtgp_gen_header(vp, info);
304 }
305
306 #if defined(PIPE_SHADER_HULL) || defined(PIPE_SHADER_DOMAIN)
307 static void
308 nvc0_tp_get_tess_mode(struct nvc0_program *tp, struct nv50_ir_prog_info *info)
309 {
310 if (info->prop.tp.outputPrim == PIPE_PRIM_MAX) {
311 tp->tp.tess_mode = ~0;
312 return;
313 }
314 switch (info->prop.tp.domain) {
315 case PIPE_PRIM_LINES:
316 tp->tp.tess_mode = NVC0_3D_TESS_MODE_PRIM_ISOLINES;
317 break;
318 case PIPE_PRIM_TRIANGLES:
319 tp->tp.tess_mode = NVC0_3D_TESS_MODE_PRIM_TRIANGLES;
320 if (info->prop.tp.winding > 0)
321 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_CW;
322 break;
323 case PIPE_PRIM_QUADS:
324 tp->tp.tess_mode = NVC0_3D_TESS_MODE_PRIM_QUADS;
325 break;
326 default:
327 tp->tp.tess_mode = ~0;
328 return;
329 }
330 if (info->prop.tp.outputPrim != PIPE_PRIM_POINTS)
331 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_CONNECTED;
332
333 switch (info->prop.tp.partitioning) {
334 case PIPE_TESS_PART_INTEGER:
335 case PIPE_TESS_PART_POW2:
336 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_SPACING_EQUAL;
337 break;
338 case PIPE_TESS_PART_FRACT_ODD:
339 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_SPACING_FRACTIONAL_ODD;
340 break;
341 case PIPE_TESS_PART_FRACT_EVEN:
342 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_SPACING_FRACTIONAL_EVEN;
343 break;
344 default:
345 assert(!"invalid tessellator partitioning");
346 break;
347 }
348 }
349 #endif
350
351 #ifdef PIPE_SHADER_HULL
352 static int
353 nvc0_tcp_gen_header(struct nvc0_program *tcp, struct nv50_ir_prog_info *info)
354 {
355 unsigned opcs = 6; /* output patch constants (at least the TessFactors) */
356
357 tcp->tp.input_patch_size = info->prop.tp.inputPatchSize;
358
359 if (info->numPatchConstants)
360 opcs = 8 + info->numPatchConstants * 4;
361
362 tcp->hdr[0] = 0x20061 | (2 << 10);
363
364 tcp->hdr[1] = opcs << 24;
365 tcp->hdr[2] = info->prop.tp.outputPatchSize << 24;
366
367 tcp->hdr[4] = 0xff000; /* initial min/max parallel output read address */
368
369 nvc0_vtgp_gen_header(tcp, info);
370
371 nvc0_tp_get_tess_mode(tcp, info);
372
373 return 0;
374 }
375 #endif
376
377 #ifdef PIPE_SHADER_DOMAIN
378 static int
379 nvc0_tep_gen_header(struct nvc0_program *tep, struct nv50_ir_prog_info *info)
380 {
381 tep->tp.input_patch_size = ~0;
382
383 tep->hdr[0] = 0x20061 | (3 << 10);
384 tep->hdr[4] = 0xff000;
385
386 nvc0_vtgp_gen_header(tep, info);
387
388 nvc0_tp_get_tess_mode(tep, info);
389
390 tep->hdr[18] |= 0x3 << 12; /* ? */
391
392 return 0;
393 }
394 #endif
395
396 static int
397 nvc0_gp_gen_header(struct nvc0_program *gp, struct nv50_ir_prog_info *info)
398 {
399 gp->hdr[0] = 0x20061 | (4 << 10);
400
401 gp->hdr[2] = MIN2(info->prop.gp.instanceCount, 32) << 24;
402
403 switch (info->prop.gp.outputPrim) {
404 case PIPE_PRIM_POINTS:
405 gp->hdr[3] = 0x01000000;
406 gp->hdr[0] |= 0xf0000000;
407 break;
408 case PIPE_PRIM_LINE_STRIP:
409 gp->hdr[3] = 0x06000000;
410 gp->hdr[0] |= 0x10000000;
411 break;
412 case PIPE_PRIM_TRIANGLE_STRIP:
413 gp->hdr[3] = 0x07000000;
414 gp->hdr[0] |= 0x10000000;
415 break;
416 default:
417 assert(0);
418 break;
419 }
420
421 gp->hdr[4] = info->prop.gp.maxVertices & 0x1ff;
422
423 return nvc0_vtgp_gen_header(gp, info);
424 }
425
426 #define NVC0_INTERP_FLAT (1 << 0)
427 #define NVC0_INTERP_PERSPECTIVE (2 << 0)
428 #define NVC0_INTERP_LINEAR (3 << 0)
429 #define NVC0_INTERP_CENTROID (1 << 2)
430
431 static uint8_t
432 nvc0_hdr_interp_mode(const struct nv50_ir_varying *var)
433 {
434 if (var->linear)
435 return NVC0_INTERP_LINEAR;
436 if (var->flat)
437 return NVC0_INTERP_FLAT;
438 return NVC0_INTERP_PERSPECTIVE;
439 }
440
441 static int
442 nvc0_fp_gen_header(struct nvc0_program *fp, struct nv50_ir_prog_info *info)
443 {
444 unsigned i, c, a, m;
445
446 /* just 00062 on Kepler */
447 fp->hdr[0] = 0x20062 | (5 << 10);
448 fp->hdr[5] = 0x80000000; /* getting a trap if FRAG_COORD_UMASK.w = 0 */
449
450 if (info->prop.fp.usesDiscard)
451 fp->hdr[0] |= 0x8000;
452 if (info->prop.fp.numColourResults > 1)
453 fp->hdr[0] |= 0x4000;
454 if (info->io.sampleMask < PIPE_MAX_SHADER_OUTPUTS)
455 fp->hdr[19] |= 0x1;
456 if (info->prop.fp.writesDepth) {
457 fp->hdr[19] |= 0x2;
458 fp->flags[0] = 0x11; /* deactivate ZCULL */
459 }
460
461 for (i = 0; i < info->numInputs; ++i) {
462 m = nvc0_hdr_interp_mode(&info->in[i]);
463 for (c = 0; c < 4; ++c) {
464 if (!(info->in[i].mask & (1 << c)))
465 continue;
466 a = info->in[i].slot[c];
467 if (info->in[i].slot[0] >= (0x060 / 4) &&
468 info->in[i].slot[0] <= (0x07c / 4)) {
469 fp->hdr[5] |= 1 << (24 + (a - 0x060 / 4));
470 } else
471 if (info->in[i].slot[0] >= (0x2c0 / 4) &&
472 info->in[i].slot[0] <= (0x2fc / 4)) {
473 fp->hdr[14] |= (1 << (a - 0x280 / 4)) & 0x03ff0000;
474 } else {
475 if (info->in[i].slot[c] < (0x040 / 4) ||
476 info->in[i].slot[c] > (0x380 / 4))
477 continue;
478 a *= 2;
479 if (info->in[i].slot[0] >= (0x300 / 4))
480 a -= 32;
481 fp->hdr[4 + a / 32] |= m << (a % 32);
482 }
483 }
484 }
485
486 for (i = 0; i < info->numOutputs; ++i) {
487 if (info->out[i].sn == TGSI_SEMANTIC_COLOR)
488 fp->hdr[18] |= info->out[i].mask << info->out[i].slot[0];
489 }
490
491 fp->fp.early_z = info->prop.fp.earlyFragTests;
492
493 return 0;
494 }
495
496 static struct nvc0_transform_feedback_state *
497 nvc0_program_create_tfb_state(const struct nv50_ir_prog_info *info,
498 const struct pipe_stream_output_info *pso)
499 {
500 struct nvc0_transform_feedback_state *tfb;
501 unsigned b, i, c;
502
503 tfb = MALLOC_STRUCT(nvc0_transform_feedback_state);
504 if (!tfb)
505 return NULL;
506 for (b = 0; b < 4; ++b) {
507 tfb->stride[b] = pso->stride[b] * 4;
508 tfb->varying_count[b] = 0;
509 }
510 memset(tfb->varying_index, 0xff, sizeof(tfb->varying_index)); /* = skip */
511
512 for (i = 0; i < pso->num_outputs; ++i) {
513 unsigned s = pso->output[i].start_component;
514 unsigned p = pso->output[i].dst_offset;
515 b = pso->output[i].output_buffer;
516
517 for (c = 0; c < pso->output[i].num_components; ++c)
518 tfb->varying_index[b][p++] =
519 info->out[pso->output[i].register_index].slot[s + c];
520
521 tfb->varying_count[b] = MAX2(tfb->varying_count[b], p);
522 }
523 for (b = 0; b < 4; ++b) // zero unused indices (looks nicer)
524 for (c = tfb->varying_count[b]; c & 3; ++c)
525 tfb->varying_index[b][c] = 0;
526
527 return tfb;
528 }
529
530 #ifdef DEBUG
531 static void
532 nvc0_program_dump(struct nvc0_program *prog)
533 {
534 unsigned pos;
535
536 for (pos = 0; pos < sizeof(prog->hdr) / sizeof(prog->hdr[0]); ++pos)
537 debug_printf("HDR[%02lx] = 0x%08x\n",
538 pos * sizeof(prog->hdr[0]), prog->hdr[pos]);
539
540 debug_printf("shader binary code (0x%x bytes):", prog->code_size);
541 for (pos = 0; pos < prog->code_size / 4; ++pos) {
542 if ((pos % 8) == 0)
543 debug_printf("\n");
544 debug_printf("%08x ", prog->code[pos]);
545 }
546 debug_printf("\n");
547 }
548 #endif
549
550 boolean
551 nvc0_program_translate(struct nvc0_program *prog, uint16_t chipset)
552 {
553 struct nv50_ir_prog_info *info;
554 int ret;
555
556 info = CALLOC_STRUCT(nv50_ir_prog_info);
557 if (!info)
558 return FALSE;
559
560 info->type = prog->type;
561 info->target = chipset;
562 info->bin.sourceRep = NV50_PROGRAM_IR_TGSI;
563 info->bin.source = (void *)prog->pipe.tokens;
564
565 info->io.genUserClip = prog->vp.num_ucps;
566 info->io.ucpBase = 256;
567 info->io.ucpBinding = 15;
568
569 info->assignSlots = nvc0_program_assign_varying_slots;
570
571 #ifdef DEBUG
572 info->optLevel = debug_get_num_option("NV50_PROG_OPTIMIZE", 3);
573 info->dbgFlags = debug_get_num_option("NV50_PROG_DEBUG", 0);
574 #else
575 info->optLevel = 3;
576 #endif
577
578 ret = nv50_ir_generate_code(info);
579 if (ret) {
580 NOUVEAU_ERR("shader translation failed: %i\n", ret);
581 goto out;
582 }
583 FREE(info->bin.syms);
584
585 prog->code = info->bin.code;
586 prog->code_size = info->bin.codeSize;
587 prog->immd_data = info->immd.buf;
588 prog->immd_size = info->immd.bufSize;
589 prog->relocs = info->bin.relocData;
590 prog->max_gpr = MAX2(4, (info->bin.maxGPR + 1));
591
592 prog->vp.need_vertex_id = info->io.vertexId < PIPE_MAX_SHADER_INPUTS;
593
594 if (info->io.edgeFlagOut < PIPE_MAX_ATTRIBS)
595 info->out[info->io.edgeFlagOut].mask = 0; /* for headergen */
596 prog->vp.edgeflag = info->io.edgeFlagIn;
597
598 switch (prog->type) {
599 case PIPE_SHADER_VERTEX:
600 ret = nvc0_vp_gen_header(prog, info);
601 break;
602 #ifdef PIPE_SHADER_HULL
603 case PIPE_SHADER_HULL:
604 ret = nvc0_tcp_gen_header(prog, info);
605 break;
606 #endif
607 #ifdef PIPE_SHADER_DOMAIN
608 case PIPE_SHADER_DOMAIN:
609 ret = nvc0_tep_gen_header(prog, info);
610 break;
611 #endif
612 case PIPE_SHADER_GEOMETRY:
613 ret = nvc0_gp_gen_header(prog, info);
614 break;
615 case PIPE_SHADER_FRAGMENT:
616 ret = nvc0_fp_gen_header(prog, info);
617 break;
618 default:
619 ret = -1;
620 NOUVEAU_ERR("unknown program type: %u\n", prog->type);
621 break;
622 }
623 if (ret)
624 goto out;
625
626 if (info->bin.tlsSpace) {
627 assert(info->bin.tlsSpace < (1 << 24));
628 prog->hdr[0] |= 1 << 26;
629 prog->hdr[1] |= info->bin.tlsSpace; /* l[] size */
630 prog->need_tls = TRUE;
631 }
632 /* TODO: factor 2 only needed where joinat/precont is used,
633 * and we only have to count non-uniform branches
634 */
635 /*
636 if ((info->maxCFDepth * 2) > 16) {
637 prog->hdr[2] |= (((info->maxCFDepth * 2) + 47) / 48) * 0x200;
638 prog->need_tls = TRUE;
639 }
640 */
641 if (info->io.globalAccess)
642 prog->hdr[0] |= 1 << 16;
643
644 if (prog->pipe.stream_output.num_outputs)
645 prog->tfb = nvc0_program_create_tfb_state(info,
646 &prog->pipe.stream_output);
647
648 out:
649 FREE(info);
650 return !ret;
651 }
652
653 boolean
654 nvc0_program_upload_code(struct nvc0_context *nvc0, struct nvc0_program *prog)
655 {
656 struct nvc0_screen *screen = nvc0->screen;
657 int ret;
658 uint32_t size = prog->code_size + NVC0_SHADER_HEADER_SIZE;
659 uint32_t lib_pos = screen->lib_code->start;
660 uint32_t code_pos;
661
662 /* c[] bindings need to be aligned to 0x100, but we could use relocations
663 * to save space. */
664 if (prog->immd_size) {
665 prog->immd_base = size;
666 size = align(size, 0x40);
667 size += prog->immd_size + 0xc0; /* add 0xc0 for align 0x40 -> 0x100 */
668 }
669 /* On Fermi, SP_START_ID must be aligned to 0x40.
670 * On Kepler, the first instruction must be aligned to 0x80 because
671 * latency information is expected only at certain positions.
672 */
673 if (screen->base.class_3d >= NVE4_3D_CLASS)
674 size = size + 0x70;
675 size = align(size, 0x40);
676
677 ret = nouveau_heap_alloc(screen->text_heap, size, prog, &prog->mem);
678 if (ret) {
679 struct nouveau_heap *heap = screen->text_heap;
680 struct nouveau_heap *iter;
681 for (iter = heap; iter && iter->next != heap; iter = iter->next) {
682 struct nvc0_program *evict = iter->priv;
683 if (evict)
684 nouveau_heap_free(&evict->mem);
685 }
686 debug_printf("WARNING: out of code space, evicting all shaders.\n");
687 ret = nouveau_heap_alloc(heap, size, prog, &prog->mem);
688 if (ret) {
689 NOUVEAU_ERR("shader too large (0x%x) to fit in code space ?\n", size);
690 return FALSE;
691 }
692 IMMED_NVC0(nvc0->base.pushbuf, NVC0_3D(SERIALIZE), 0);
693 }
694 prog->code_base = prog->mem->start;
695 prog->immd_base = align(prog->mem->start + prog->immd_base, 0x100);
696 assert((prog->immd_size == 0) || (prog->immd_base + prog->immd_size <=
697 prog->mem->start + prog->mem->size));
698
699 if (screen->base.class_3d >= NVE4_3D_CLASS) {
700 switch (prog->mem->start & 0xff) {
701 case 0x40: prog->code_base += 0x70; break;
702 case 0x80: prog->code_base += 0x30; break;
703 case 0xc0: prog->code_base += 0x70; break;
704 default:
705 prog->code_base += 0x30;
706 assert((prog->mem->start & 0xff) == 0x00);
707 break;
708 }
709 }
710 code_pos = prog->code_base + NVC0_SHADER_HEADER_SIZE;
711
712 if (prog->relocs)
713 nv50_ir_relocate_code(prog->relocs, prog->code, code_pos, lib_pos, 0);
714
715 #ifdef DEBUG
716 if (debug_get_bool_option("NV50_PROG_DEBUG", FALSE))
717 nvc0_program_dump(prog);
718 #endif
719
720 nvc0->base.push_data(&nvc0->base, screen->text, prog->code_base,
721 NOUVEAU_BO_VRAM, NVC0_SHADER_HEADER_SIZE, prog->hdr);
722 nvc0->base.push_data(&nvc0->base, screen->text,
723 prog->code_base + NVC0_SHADER_HEADER_SIZE,
724 NOUVEAU_BO_VRAM, prog->code_size, prog->code);
725 if (prog->immd_size)
726 nvc0->base.push_data(&nvc0->base,
727 screen->text, prog->immd_base, NOUVEAU_BO_VRAM,
728 prog->immd_size, prog->immd_data);
729
730 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(MEM_BARRIER), 1);
731 PUSH_DATA (nvc0->base.pushbuf, 0x1011);
732
733 return TRUE;
734 }
735
736 /* Upload code for builtin functions like integer division emulation. */
737 void
738 nvc0_program_library_upload(struct nvc0_context *nvc0)
739 {
740 struct nvc0_screen *screen = nvc0->screen;
741 int ret;
742 uint32_t size;
743 const uint32_t *code;
744
745 if (screen->lib_code)
746 return;
747
748 nv50_ir_get_target_library(screen->base.device->chipset, &code, &size);
749 if (!size)
750 return;
751
752 ret = nouveau_heap_alloc(screen->text_heap, align(size, 0x100), NULL,
753 &screen->lib_code);
754 if (ret)
755 return;
756
757 nvc0->base.push_data(&nvc0->base,
758 screen->text, screen->lib_code->start, NOUVEAU_BO_VRAM,
759 size, code);
760 /* no need for a memory barrier, will be emitted with first program */
761 }
762
763 void
764 nvc0_program_destroy(struct nvc0_context *nvc0, struct nvc0_program *prog)
765 {
766 const struct pipe_shader_state pipe = prog->pipe;
767 const ubyte type = prog->type;
768
769 if (prog->mem)
770 nouveau_heap_free(&prog->mem);
771
772 FREE(prog->code);
773 FREE(prog->immd_data);
774 FREE(prog->relocs);
775 if (prog->tfb) {
776 if (nvc0->state.tfb == prog->tfb)
777 nvc0->state.tfb = NULL;
778 FREE(prog->tfb);
779 }
780
781 memset(prog, 0, sizeof(*prog));
782
783 prog->pipe = pipe;
784 prog->type = type;
785 }