ae21789001a234052654c09d870ee380d5d8480a
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_program.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "pipe/p_defines.h"
24
25 #include "tgsi/tgsi_ureg.h"
26
27 #include "nvc0/nvc0_context.h"
28
29 #include "codegen/nv50_ir_driver.h"
30 #include "nvc0/nve4_compute.h"
31
32 /* NOTE: Using a[0x270] in FP may cause an error even if we're using less than
33 * 124 scalar varying values.
34 */
35 static uint32_t
36 nvc0_shader_input_address(unsigned sn, unsigned si)
37 {
38 switch (sn) {
39 case TGSI_SEMANTIC_TESSOUTER: return 0x000 + si * 0x4;
40 case TGSI_SEMANTIC_TESSINNER: return 0x010 + si * 0x4;
41 case TGSI_SEMANTIC_PATCH: return 0x020 + si * 0x10;
42 case TGSI_SEMANTIC_PRIMID: return 0x060;
43 case TGSI_SEMANTIC_LAYER: return 0x064;
44 case TGSI_SEMANTIC_VIEWPORT_INDEX:return 0x068;
45 case TGSI_SEMANTIC_PSIZE: return 0x06c;
46 case TGSI_SEMANTIC_POSITION: return 0x070;
47 case TGSI_SEMANTIC_GENERIC: return 0x080 + si * 0x10;
48 case TGSI_SEMANTIC_FOG: return 0x2e8;
49 case TGSI_SEMANTIC_COLOR: return 0x280 + si * 0x10;
50 case TGSI_SEMANTIC_BCOLOR: return 0x2a0 + si * 0x10;
51 case TGSI_SEMANTIC_CLIPDIST: return 0x2c0 + si * 0x10;
52 case TGSI_SEMANTIC_CLIPVERTEX: return 0x270;
53 case TGSI_SEMANTIC_PCOORD: return 0x2e0;
54 case TGSI_SEMANTIC_TESSCOORD: return 0x2f0;
55 case TGSI_SEMANTIC_INSTANCEID: return 0x2f8;
56 case TGSI_SEMANTIC_VERTEXID: return 0x2fc;
57 case TGSI_SEMANTIC_TEXCOORD: return 0x300 + si * 0x10;
58 default:
59 assert(!"invalid TGSI input semantic");
60 return ~0;
61 }
62 }
63
64 static uint32_t
65 nvc0_shader_output_address(unsigned sn, unsigned si)
66 {
67 switch (sn) {
68 case TGSI_SEMANTIC_TESSOUTER: return 0x000 + si * 0x4;
69 case TGSI_SEMANTIC_TESSINNER: return 0x010 + si * 0x4;
70 case TGSI_SEMANTIC_PATCH: return 0x020 + si * 0x10;
71 case TGSI_SEMANTIC_PRIMID: return 0x060;
72 case TGSI_SEMANTIC_LAYER: return 0x064;
73 case TGSI_SEMANTIC_VIEWPORT_INDEX:return 0x068;
74 case TGSI_SEMANTIC_PSIZE: return 0x06c;
75 case TGSI_SEMANTIC_POSITION: return 0x070;
76 case TGSI_SEMANTIC_GENERIC: return 0x080 + si * 0x10;
77 case TGSI_SEMANTIC_FOG: return 0x2e8;
78 case TGSI_SEMANTIC_COLOR: return 0x280 + si * 0x10;
79 case TGSI_SEMANTIC_BCOLOR: return 0x2a0 + si * 0x10;
80 case TGSI_SEMANTIC_CLIPDIST: return 0x2c0 + si * 0x10;
81 case TGSI_SEMANTIC_CLIPVERTEX: return 0x270;
82 case TGSI_SEMANTIC_TEXCOORD: return 0x300 + si * 0x10;
83 /* case TGSI_SEMANTIC_VIEWPORT_MASK: return 0x3a0; */
84 case TGSI_SEMANTIC_EDGEFLAG: return ~0;
85 default:
86 assert(!"invalid TGSI output semantic");
87 return ~0;
88 }
89 }
90
91 static int
92 nvc0_vp_assign_input_slots(struct nv50_ir_prog_info *info)
93 {
94 unsigned i, c, n;
95
96 for (n = 0, i = 0; i < info->numInputs; ++i) {
97 switch (info->in[i].sn) {
98 case TGSI_SEMANTIC_INSTANCEID: /* for SM4 only, in TGSI they're SVs */
99 case TGSI_SEMANTIC_VERTEXID:
100 info->in[i].mask = 0x1;
101 info->in[i].slot[0] =
102 nvc0_shader_input_address(info->in[i].sn, 0) / 4;
103 continue;
104 default:
105 break;
106 }
107 for (c = 0; c < 4; ++c)
108 info->in[i].slot[c] = (0x80 + n * 0x10 + c * 0x4) / 4;
109 ++n;
110 }
111
112 return 0;
113 }
114
115 static int
116 nvc0_sp_assign_input_slots(struct nv50_ir_prog_info *info)
117 {
118 unsigned offset;
119 unsigned i, c;
120
121 for (i = 0; i < info->numInputs; ++i) {
122 offset = nvc0_shader_input_address(info->in[i].sn, info->in[i].si);
123
124 for (c = 0; c < 4; ++c)
125 info->in[i].slot[c] = (offset + c * 0x4) / 4;
126 }
127
128 return 0;
129 }
130
131 static int
132 nvc0_fp_assign_output_slots(struct nv50_ir_prog_info *info)
133 {
134 unsigned count = info->prop.fp.numColourResults * 4;
135 unsigned i, c;
136
137 for (i = 0; i < info->numOutputs; ++i)
138 if (info->out[i].sn == TGSI_SEMANTIC_COLOR)
139 for (c = 0; c < 4; ++c)
140 info->out[i].slot[c] = info->out[i].si * 4 + c;
141
142 if (info->io.sampleMask < PIPE_MAX_SHADER_OUTPUTS)
143 info->out[info->io.sampleMask].slot[0] = count++;
144 else
145 if (info->target >= 0xe0)
146 count++; /* on Kepler, depth is always last colour reg + 2 */
147
148 if (info->io.fragDepth < PIPE_MAX_SHADER_OUTPUTS)
149 info->out[info->io.fragDepth].slot[2] = count;
150
151 return 0;
152 }
153
154 static int
155 nvc0_sp_assign_output_slots(struct nv50_ir_prog_info *info)
156 {
157 unsigned offset;
158 unsigned i, c;
159
160 for (i = 0; i < info->numOutputs; ++i) {
161 offset = nvc0_shader_output_address(info->out[i].sn, info->out[i].si);
162
163 for (c = 0; c < 4; ++c)
164 info->out[i].slot[c] = (offset + c * 0x4) / 4;
165 }
166
167 return 0;
168 }
169
170 static int
171 nvc0_program_assign_varying_slots(struct nv50_ir_prog_info *info)
172 {
173 int ret;
174
175 if (info->type == PIPE_SHADER_VERTEX)
176 ret = nvc0_vp_assign_input_slots(info);
177 else
178 ret = nvc0_sp_assign_input_slots(info);
179 if (ret)
180 return ret;
181
182 if (info->type == PIPE_SHADER_FRAGMENT)
183 ret = nvc0_fp_assign_output_slots(info);
184 else
185 ret = nvc0_sp_assign_output_slots(info);
186 return ret;
187 }
188
189 static inline void
190 nvc0_vtgp_hdr_update_oread(struct nvc0_program *vp, uint8_t slot)
191 {
192 uint8_t min = (vp->hdr[4] >> 12) & 0xff;
193 uint8_t max = (vp->hdr[4] >> 24);
194
195 min = MIN2(min, slot);
196 max = MAX2(max, slot);
197
198 vp->hdr[4] = (max << 24) | (min << 12);
199 }
200
201 /* Common part of header generation for VP, TCP, TEP and GP. */
202 static int
203 nvc0_vtgp_gen_header(struct nvc0_program *vp, struct nv50_ir_prog_info *info)
204 {
205 unsigned i, c, a;
206
207 for (i = 0; i < info->numInputs; ++i) {
208 if (info->in[i].patch)
209 continue;
210 for (c = 0; c < 4; ++c) {
211 a = info->in[i].slot[c];
212 if (info->in[i].mask & (1 << c))
213 vp->hdr[5 + a / 32] |= 1 << (a % 32);
214 }
215 }
216
217 for (i = 0; i < info->numOutputs; ++i) {
218 if (info->out[i].patch)
219 continue;
220 for (c = 0; c < 4; ++c) {
221 if (!(info->out[i].mask & (1 << c)))
222 continue;
223 assert(info->out[i].slot[c] >= 0x40 / 4);
224 a = info->out[i].slot[c] - 0x40 / 4;
225 vp->hdr[13 + a / 32] |= 1 << (a % 32);
226 if (info->out[i].oread)
227 nvc0_vtgp_hdr_update_oread(vp, info->out[i].slot[c]);
228 }
229 }
230
231 for (i = 0; i < info->numSysVals; ++i) {
232 switch (info->sv[i].sn) {
233 case TGSI_SEMANTIC_PRIMID:
234 vp->hdr[5] |= 1 << 24;
235 break;
236 case TGSI_SEMANTIC_INSTANCEID:
237 vp->hdr[10] |= 1 << 30;
238 break;
239 case TGSI_SEMANTIC_VERTEXID:
240 vp->hdr[10] |= 1 << 31;
241 break;
242 case TGSI_SEMANTIC_TESSCOORD:
243 /* We don't have the mask, nor the slots populated. While this could
244 * be achieved, the vast majority of the time if either of the coords
245 * are read, then both will be read.
246 */
247 nvc0_vtgp_hdr_update_oread(vp, 0x2f0 / 4);
248 nvc0_vtgp_hdr_update_oread(vp, 0x2f4 / 4);
249 break;
250 default:
251 break;
252 }
253 }
254
255 vp->vp.clip_enable = (1 << info->io.clipDistances) - 1;
256 vp->vp.cull_enable =
257 ((1 << info->io.cullDistances) - 1) << info->io.clipDistances;
258 for (i = 0; i < info->io.cullDistances; ++i)
259 vp->vp.clip_mode |= 1 << ((info->io.clipDistances + i) * 4);
260
261 if (info->io.genUserClip < 0)
262 vp->vp.num_ucps = PIPE_MAX_CLIP_PLANES + 1; /* prevent rebuilding */
263
264 return 0;
265 }
266
267 static int
268 nvc0_vp_gen_header(struct nvc0_program *vp, struct nv50_ir_prog_info *info)
269 {
270 vp->hdr[0] = 0x20061 | (1 << 10);
271 vp->hdr[4] = 0xff000;
272
273 return nvc0_vtgp_gen_header(vp, info);
274 }
275
276 static void
277 nvc0_tp_get_tess_mode(struct nvc0_program *tp, struct nv50_ir_prog_info *info)
278 {
279 if (info->prop.tp.outputPrim == PIPE_PRIM_MAX) {
280 tp->tp.tess_mode = ~0;
281 return;
282 }
283 switch (info->prop.tp.domain) {
284 case PIPE_PRIM_LINES:
285 tp->tp.tess_mode = NVC0_3D_TESS_MODE_PRIM_ISOLINES;
286 break;
287 case PIPE_PRIM_TRIANGLES:
288 tp->tp.tess_mode = NVC0_3D_TESS_MODE_PRIM_TRIANGLES;
289 break;
290 case PIPE_PRIM_QUADS:
291 tp->tp.tess_mode = NVC0_3D_TESS_MODE_PRIM_QUADS;
292 break;
293 default:
294 tp->tp.tess_mode = ~0;
295 return;
296 }
297
298 /* It seems like lines want the "CW" bit to indicate they're connected, and
299 * spit out errors in dmesg when the "CONNECTED" bit is set.
300 */
301 if (info->prop.tp.outputPrim != PIPE_PRIM_POINTS) {
302 if (info->prop.tp.domain == PIPE_PRIM_LINES)
303 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_CW;
304 else
305 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_CONNECTED;
306 }
307
308 /* Winding only matters for triangles/quads, not lines. */
309 if (info->prop.tp.domain != PIPE_PRIM_LINES &&
310 info->prop.tp.outputPrim != PIPE_PRIM_POINTS &&
311 info->prop.tp.winding > 0)
312 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_CW;
313
314 switch (info->prop.tp.partitioning) {
315 case PIPE_TESS_SPACING_EQUAL:
316 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_SPACING_EQUAL;
317 break;
318 case PIPE_TESS_SPACING_FRACTIONAL_ODD:
319 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_SPACING_FRACTIONAL_ODD;
320 break;
321 case PIPE_TESS_SPACING_FRACTIONAL_EVEN:
322 tp->tp.tess_mode |= NVC0_3D_TESS_MODE_SPACING_FRACTIONAL_EVEN;
323 break;
324 default:
325 assert(!"invalid tessellator partitioning");
326 break;
327 }
328 }
329
330 static int
331 nvc0_tcp_gen_header(struct nvc0_program *tcp, struct nv50_ir_prog_info *info)
332 {
333 unsigned opcs = 6; /* output patch constants (at least the TessFactors) */
334
335 tcp->tp.input_patch_size = info->prop.tp.inputPatchSize;
336
337 if (info->numPatchConstants)
338 opcs = 8 + info->numPatchConstants * 4;
339
340 tcp->hdr[0] = 0x20061 | (2 << 10);
341
342 tcp->hdr[1] = opcs << 24;
343 tcp->hdr[2] = info->prop.tp.outputPatchSize << 24;
344
345 tcp->hdr[4] = 0xff000; /* initial min/max parallel output read address */
346
347 nvc0_vtgp_gen_header(tcp, info);
348
349 if (info->target >= NVISA_GM107_CHIPSET) {
350 /* On GM107+, the number of output patch components has moved in the TCP
351 * header, but it seems like blob still also uses the old position.
352 * Also, the high 8-bits are located inbetween the min/max parallel
353 * field and has to be set after updating the outputs. */
354 tcp->hdr[3] = (opcs & 0x0f) << 28;
355 tcp->hdr[4] |= (opcs & 0xf0) << 16;
356 }
357
358 nvc0_tp_get_tess_mode(tcp, info);
359
360 return 0;
361 }
362
363 static int
364 nvc0_tep_gen_header(struct nvc0_program *tep, struct nv50_ir_prog_info *info)
365 {
366 tep->tp.input_patch_size = ~0;
367
368 tep->hdr[0] = 0x20061 | (3 << 10);
369 tep->hdr[4] = 0xff000;
370
371 nvc0_vtgp_gen_header(tep, info);
372
373 nvc0_tp_get_tess_mode(tep, info);
374
375 tep->hdr[18] |= 0x3 << 12; /* ? */
376
377 return 0;
378 }
379
380 static int
381 nvc0_gp_gen_header(struct nvc0_program *gp, struct nv50_ir_prog_info *info)
382 {
383 gp->hdr[0] = 0x20061 | (4 << 10);
384
385 gp->hdr[2] = MIN2(info->prop.gp.instanceCount, 32) << 24;
386
387 switch (info->prop.gp.outputPrim) {
388 case PIPE_PRIM_POINTS:
389 gp->hdr[3] = 0x01000000;
390 gp->hdr[0] |= 0xf0000000;
391 break;
392 case PIPE_PRIM_LINE_STRIP:
393 gp->hdr[3] = 0x06000000;
394 gp->hdr[0] |= 0x10000000;
395 break;
396 case PIPE_PRIM_TRIANGLE_STRIP:
397 gp->hdr[3] = 0x07000000;
398 gp->hdr[0] |= 0x10000000;
399 break;
400 default:
401 assert(0);
402 break;
403 }
404
405 gp->hdr[4] = CLAMP(info->prop.gp.maxVertices, 1, 1024);
406
407 return nvc0_vtgp_gen_header(gp, info);
408 }
409
410 #define NVC0_INTERP_FLAT (1 << 0)
411 #define NVC0_INTERP_PERSPECTIVE (2 << 0)
412 #define NVC0_INTERP_LINEAR (3 << 0)
413 #define NVC0_INTERP_CENTROID (1 << 2)
414
415 static uint8_t
416 nvc0_hdr_interp_mode(const struct nv50_ir_varying *var)
417 {
418 if (var->linear)
419 return NVC0_INTERP_LINEAR;
420 if (var->flat)
421 return NVC0_INTERP_FLAT;
422 return NVC0_INTERP_PERSPECTIVE;
423 }
424
425 static int
426 nvc0_fp_gen_header(struct nvc0_program *fp, struct nv50_ir_prog_info *info)
427 {
428 unsigned i, c, a, m;
429
430 /* just 00062 on Kepler */
431 fp->hdr[0] = 0x20062 | (5 << 10);
432 fp->hdr[5] = 0x80000000; /* getting a trap if FRAG_COORD_UMASK.w = 0 */
433
434 if (info->prop.fp.usesDiscard)
435 fp->hdr[0] |= 0x8000;
436 if (info->prop.fp.numColourResults > 1)
437 fp->hdr[0] |= 0x4000;
438 if (info->io.sampleMask < PIPE_MAX_SHADER_OUTPUTS)
439 fp->hdr[19] |= 0x1;
440 if (info->prop.fp.writesDepth) {
441 fp->hdr[19] |= 0x2;
442 fp->flags[0] = 0x11; /* deactivate ZCULL */
443 }
444
445 for (i = 0; i < info->numInputs; ++i) {
446 m = nvc0_hdr_interp_mode(&info->in[i]);
447 if (info->in[i].sn == TGSI_SEMANTIC_COLOR) {
448 fp->fp.colors |= 1 << info->in[i].si;
449 if (info->in[i].sc)
450 fp->fp.color_interp[info->in[i].si] = m | (info->in[i].mask << 4);
451 }
452 for (c = 0; c < 4; ++c) {
453 if (!(info->in[i].mask & (1 << c)))
454 continue;
455 a = info->in[i].slot[c];
456 if (info->in[i].slot[0] >= (0x060 / 4) &&
457 info->in[i].slot[0] <= (0x07c / 4)) {
458 fp->hdr[5] |= 1 << (24 + (a - 0x060 / 4));
459 } else
460 if (info->in[i].slot[0] >= (0x2c0 / 4) &&
461 info->in[i].slot[0] <= (0x2fc / 4)) {
462 fp->hdr[14] |= (1 << (a - 0x280 / 4)) & 0x07ff0000;
463 } else {
464 if (info->in[i].slot[c] < (0x040 / 4) ||
465 info->in[i].slot[c] > (0x380 / 4))
466 continue;
467 a *= 2;
468 if (info->in[i].slot[0] >= (0x300 / 4))
469 a -= 32;
470 fp->hdr[4 + a / 32] |= m << (a % 32);
471 }
472 }
473 }
474
475 for (i = 0; i < info->numOutputs; ++i) {
476 if (info->out[i].sn == TGSI_SEMANTIC_COLOR)
477 fp->hdr[18] |= 0xf << info->out[i].slot[0];
478 }
479
480 /* There are no "regular" attachments, but the shader still needs to be
481 * executed. It seems like it wants to think that it has some color
482 * outputs in order to actually run.
483 */
484 if (info->prop.fp.numColourResults == 0 && !info->prop.fp.writesDepth)
485 fp->hdr[18] |= 0xf;
486
487 fp->fp.early_z = info->prop.fp.earlyFragTests;
488 fp->fp.sample_mask_in = info->prop.fp.usesSampleMaskIn;
489
490 return 0;
491 }
492
493 static struct nvc0_transform_feedback_state *
494 nvc0_program_create_tfb_state(const struct nv50_ir_prog_info *info,
495 const struct pipe_stream_output_info *pso)
496 {
497 struct nvc0_transform_feedback_state *tfb;
498 unsigned b, i, c;
499
500 tfb = MALLOC_STRUCT(nvc0_transform_feedback_state);
501 if (!tfb)
502 return NULL;
503 for (b = 0; b < 4; ++b) {
504 tfb->stride[b] = pso->stride[b] * 4;
505 tfb->varying_count[b] = 0;
506 }
507 memset(tfb->varying_index, 0xff, sizeof(tfb->varying_index)); /* = skip */
508
509 for (i = 0; i < pso->num_outputs; ++i) {
510 unsigned s = pso->output[i].start_component;
511 unsigned p = pso->output[i].dst_offset;
512 b = pso->output[i].output_buffer;
513
514 for (c = 0; c < pso->output[i].num_components; ++c)
515 tfb->varying_index[b][p++] =
516 info->out[pso->output[i].register_index].slot[s + c];
517
518 tfb->varying_count[b] = MAX2(tfb->varying_count[b], p);
519 tfb->stream[b] = pso->output[i].stream;
520 }
521 for (b = 0; b < 4; ++b) // zero unused indices (looks nicer)
522 for (c = tfb->varying_count[b]; c & 3; ++c)
523 tfb->varying_index[b][c] = 0;
524
525 return tfb;
526 }
527
528 #ifdef DEBUG
529 static void
530 nvc0_program_dump(struct nvc0_program *prog)
531 {
532 unsigned pos;
533
534 if (prog->type != PIPE_SHADER_COMPUTE) {
535 for (pos = 0; pos < ARRAY_SIZE(prog->hdr); ++pos)
536 debug_printf("HDR[%02"PRIxPTR"] = 0x%08x\n",
537 pos * sizeof(prog->hdr[0]), prog->hdr[pos]);
538 }
539 debug_printf("shader binary code (0x%x bytes):", prog->code_size);
540 for (pos = 0; pos < prog->code_size / 4; ++pos) {
541 if ((pos % 8) == 0)
542 debug_printf("\n");
543 debug_printf("%08x ", prog->code[pos]);
544 }
545 debug_printf("\n");
546 }
547 #endif
548
549 bool
550 nvc0_program_translate(struct nvc0_program *prog, uint16_t chipset,
551 struct pipe_debug_callback *debug)
552 {
553 struct nv50_ir_prog_info *info;
554 int ret;
555
556 info = CALLOC_STRUCT(nv50_ir_prog_info);
557 if (!info)
558 return false;
559
560 info->type = prog->type;
561 info->target = chipset;
562 info->bin.sourceRep = NV50_PROGRAM_IR_TGSI;
563 info->bin.source = (void *)prog->pipe.tokens;
564
565 info->io.genUserClip = prog->vp.num_ucps;
566 info->io.auxCBSlot = 15;
567 info->io.msInfoCBSlot = 15;
568 info->io.ucpBase = NVC0_CB_AUX_UCP_INFO;
569 info->io.drawInfoBase = NVC0_CB_AUX_DRAW_INFO;
570 info->io.msInfoBase = NVC0_CB_AUX_MS_INFO;
571 info->io.bufInfoBase = NVC0_CB_AUX_BUF_INFO(0);
572 info->io.suInfoBase = NVC0_CB_AUX_SU_INFO(0);
573 if (chipset >= NVISA_GK104_CHIPSET) {
574 info->io.texBindBase = NVC0_CB_AUX_TEX_INFO(0);
575 }
576
577 if (prog->type == PIPE_SHADER_COMPUTE) {
578 if (chipset >= NVISA_GK104_CHIPSET) {
579 info->io.auxCBSlot = 7;
580 info->io.msInfoCBSlot = 7;
581 info->io.uboInfoBase = NVC0_CB_AUX_UBO_INFO(0);
582 }
583 info->prop.cp.gridInfoBase = NVC0_CB_AUX_GRID_INFO(0);
584 } else {
585 info->io.sampleInfoBase = NVC0_CB_AUX_SAMPLE_INFO;
586 }
587
588 info->assignSlots = nvc0_program_assign_varying_slots;
589
590 #ifdef DEBUG
591 info->optLevel = debug_get_num_option("NV50_PROG_OPTIMIZE", 3);
592 info->dbgFlags = debug_get_num_option("NV50_PROG_DEBUG", 0);
593 #else
594 info->optLevel = 3;
595 #endif
596
597 ret = nv50_ir_generate_code(info);
598 if (ret) {
599 NOUVEAU_ERR("shader translation failed: %i\n", ret);
600 goto out;
601 }
602 if (prog->type != PIPE_SHADER_COMPUTE)
603 FREE(info->bin.syms);
604
605 prog->code = info->bin.code;
606 prog->code_size = info->bin.codeSize;
607 prog->immd_data = info->immd.buf;
608 prog->immd_size = info->immd.bufSize;
609 prog->relocs = info->bin.relocData;
610 prog->fixups = info->bin.fixupData;
611 prog->num_gprs = MAX2(4, (info->bin.maxGPR + 1));
612 prog->num_barriers = info->numBarriers;
613
614 prog->vp.need_vertex_id = info->io.vertexId < PIPE_MAX_SHADER_INPUTS;
615 prog->vp.need_draw_parameters = info->prop.vp.usesDrawParameters;
616
617 if (info->io.edgeFlagOut < PIPE_MAX_ATTRIBS)
618 info->out[info->io.edgeFlagOut].mask = 0; /* for headergen */
619 prog->vp.edgeflag = info->io.edgeFlagIn;
620
621 switch (prog->type) {
622 case PIPE_SHADER_VERTEX:
623 ret = nvc0_vp_gen_header(prog, info);
624 break;
625 case PIPE_SHADER_TESS_CTRL:
626 ret = nvc0_tcp_gen_header(prog, info);
627 break;
628 case PIPE_SHADER_TESS_EVAL:
629 ret = nvc0_tep_gen_header(prog, info);
630 break;
631 case PIPE_SHADER_GEOMETRY:
632 ret = nvc0_gp_gen_header(prog, info);
633 break;
634 case PIPE_SHADER_FRAGMENT:
635 ret = nvc0_fp_gen_header(prog, info);
636 break;
637 case PIPE_SHADER_COMPUTE:
638 prog->cp.syms = info->bin.syms;
639 prog->cp.num_syms = info->bin.numSyms;
640 break;
641 default:
642 ret = -1;
643 NOUVEAU_ERR("unknown program type: %u\n", prog->type);
644 break;
645 }
646 if (ret)
647 goto out;
648
649 if (info->bin.tlsSpace) {
650 assert(info->bin.tlsSpace < (1 << 24));
651 prog->hdr[0] |= 1 << 26;
652 prog->hdr[1] |= align(info->bin.tlsSpace, 0x10); /* l[] size */
653 prog->need_tls = true;
654 }
655 /* TODO: factor 2 only needed where joinat/precont is used,
656 * and we only have to count non-uniform branches
657 */
658 /*
659 if ((info->maxCFDepth * 2) > 16) {
660 prog->hdr[2] |= (((info->maxCFDepth * 2) + 47) / 48) * 0x200;
661 prog->need_tls = true;
662 }
663 */
664 if (info->io.globalAccess)
665 prog->hdr[0] |= 1 << 26;
666 if (info->io.globalAccess & 0x2)
667 prog->hdr[0] |= 1 << 16;
668 if (info->io.fp64)
669 prog->hdr[0] |= 1 << 27;
670
671 if (prog->pipe.stream_output.num_outputs)
672 prog->tfb = nvc0_program_create_tfb_state(info,
673 &prog->pipe.stream_output);
674
675 pipe_debug_message(debug, SHADER_INFO,
676 "type: %d, local: %d, gpr: %d, inst: %d, bytes: %d",
677 prog->type, info->bin.tlsSpace, prog->num_gprs,
678 info->bin.instructions, info->bin.codeSize);
679
680 out:
681 FREE(info);
682 return !ret;
683 }
684
685 bool
686 nvc0_program_upload_code(struct nvc0_context *nvc0, struct nvc0_program *prog)
687 {
688 struct nvc0_screen *screen = nvc0->screen;
689 const bool is_cp = prog->type == PIPE_SHADER_COMPUTE;
690 int ret;
691 uint32_t size = prog->code_size + (is_cp ? 0 : NVC0_SHADER_HEADER_SIZE);
692 uint32_t lib_pos = screen->lib_code->start;
693 uint32_t code_pos;
694
695 /* c[] bindings need to be aligned to 0x100, but we could use relocations
696 * to save space. */
697 if (prog->immd_size) {
698 prog->immd_base = size;
699 size = align(size, 0x40);
700 size += prog->immd_size + 0xc0; /* add 0xc0 for align 0x40 -> 0x100 */
701 }
702 /* On Fermi, SP_START_ID must be aligned to 0x40.
703 * On Kepler, the first instruction must be aligned to 0x80 because
704 * latency information is expected only at certain positions.
705 */
706 if (screen->base.class_3d >= NVE4_3D_CLASS)
707 size = size + (is_cp ? 0x40 : 0x70);
708 size = align(size, 0x40);
709
710 ret = nouveau_heap_alloc(screen->text_heap, size, prog, &prog->mem);
711 if (ret) {
712 struct nouveau_heap *heap = screen->text_heap;
713 /* Note that the code library, which is allocated before anything else,
714 * does not have a priv pointer. We can stop once we hit it.
715 */
716 while (heap->next && heap->next->priv) {
717 struct nvc0_program *evict = heap->next->priv;
718 nouveau_heap_free(&evict->mem);
719 }
720 debug_printf("WARNING: out of code space, evicting all shaders.\n");
721 ret = nouveau_heap_alloc(heap, size, prog, &prog->mem);
722 if (ret) {
723 NOUVEAU_ERR("shader too large (0x%x) to fit in code space ?\n", size);
724 return false;
725 }
726 IMMED_NVC0(nvc0->base.pushbuf, NVC0_3D(SERIALIZE), 0);
727 }
728 prog->code_base = prog->mem->start;
729 prog->immd_base = align(prog->mem->start + prog->immd_base, 0x100);
730 assert((prog->immd_size == 0) || (prog->immd_base + prog->immd_size <=
731 prog->mem->start + prog->mem->size));
732
733 if (!is_cp) {
734 if (screen->base.class_3d >= NVE4_3D_CLASS) {
735 switch (prog->mem->start & 0xff) {
736 case 0x40: prog->code_base += 0x70; break;
737 case 0x80: prog->code_base += 0x30; break;
738 case 0xc0: prog->code_base += 0x70; break;
739 default:
740 prog->code_base += 0x30;
741 assert((prog->mem->start & 0xff) == 0x00);
742 break;
743 }
744 }
745 code_pos = prog->code_base + NVC0_SHADER_HEADER_SIZE;
746 } else {
747 if (screen->base.class_3d >= NVE4_3D_CLASS) {
748 if (prog->mem->start & 0x40)
749 prog->code_base += 0x40;
750 assert((prog->code_base & 0x7f) == 0x00);
751 }
752 code_pos = prog->code_base;
753 }
754
755 if (prog->relocs)
756 nv50_ir_relocate_code(prog->relocs, prog->code, code_pos, lib_pos, 0);
757 if (prog->fixups) {
758 nv50_ir_apply_fixups(prog->fixups, prog->code,
759 prog->fp.force_persample_interp,
760 prog->fp.flatshade,
761 0 /* alphatest */);
762 for (int i = 0; i < 2; i++) {
763 unsigned mask = prog->fp.color_interp[i] >> 4;
764 unsigned interp = prog->fp.color_interp[i] & 3;
765 if (!mask)
766 continue;
767 prog->hdr[14] &= ~(0xff << (8 * i));
768 if (prog->fp.flatshade)
769 interp = NVC0_INTERP_FLAT;
770 for (int c = 0; c < 4; c++)
771 if (mask & (1 << c))
772 prog->hdr[14] |= interp << (2 * (4 * i + c));
773 }
774 }
775
776 #ifdef DEBUG
777 if (debug_get_bool_option("NV50_PROG_DEBUG", false))
778 nvc0_program_dump(prog);
779 #endif
780
781 if (!is_cp)
782 nvc0->base.push_data(&nvc0->base, screen->text, prog->code_base,
783 NV_VRAM_DOMAIN(&screen->base), NVC0_SHADER_HEADER_SIZE, prog->hdr);
784 nvc0->base.push_data(&nvc0->base, screen->text, code_pos,
785 NV_VRAM_DOMAIN(&screen->base), prog->code_size, prog->code);
786 if (prog->immd_size)
787 nvc0->base.push_data(&nvc0->base,
788 screen->text, prog->immd_base, NV_VRAM_DOMAIN(&screen->base),
789 prog->immd_size, prog->immd_data);
790
791 BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(MEM_BARRIER), 1);
792 PUSH_DATA (nvc0->base.pushbuf, 0x1011);
793
794 return true;
795 }
796
797 /* Upload code for builtin functions like integer division emulation. */
798 void
799 nvc0_program_library_upload(struct nvc0_context *nvc0)
800 {
801 struct nvc0_screen *screen = nvc0->screen;
802 int ret;
803 uint32_t size;
804 const uint32_t *code;
805
806 if (screen->lib_code)
807 return;
808
809 nv50_ir_get_target_library(screen->base.device->chipset, &code, &size);
810 if (!size)
811 return;
812
813 ret = nouveau_heap_alloc(screen->text_heap, align(size, 0x100), NULL,
814 &screen->lib_code);
815 if (ret)
816 return;
817
818 nvc0->base.push_data(&nvc0->base,
819 screen->text, screen->lib_code->start, NV_VRAM_DOMAIN(&screen->base),
820 size, code);
821 /* no need for a memory barrier, will be emitted with first program */
822 }
823
824 void
825 nvc0_program_destroy(struct nvc0_context *nvc0, struct nvc0_program *prog)
826 {
827 const struct pipe_shader_state pipe = prog->pipe;
828 const ubyte type = prog->type;
829
830 if (prog->mem)
831 nouveau_heap_free(&prog->mem);
832 FREE(prog->code); /* may be 0 for hardcoded shaders */
833 FREE(prog->immd_data);
834 FREE(prog->relocs);
835 FREE(prog->fixups);
836 if (prog->type == PIPE_SHADER_COMPUTE && prog->cp.syms)
837 FREE(prog->cp.syms);
838 if (prog->tfb) {
839 if (nvc0->state.tfb == prog->tfb)
840 nvc0->state.tfb = NULL;
841 FREE(prog->tfb);
842 }
843
844 memset(prog, 0, sizeof(*prog));
845
846 prog->pipe = pipe;
847 prog->type = type;
848 }
849
850 uint32_t
851 nvc0_program_symbol_offset(const struct nvc0_program *prog, uint32_t label)
852 {
853 const struct nv50_ir_prog_symbol *syms =
854 (const struct nv50_ir_prog_symbol *)prog->cp.syms;
855 unsigned base = 0;
856 unsigned i;
857 if (prog->type != PIPE_SHADER_COMPUTE)
858 base = NVC0_SHADER_HEADER_SIZE;
859 for (i = 0; i < prog->cp.num_syms; ++i)
860 if (syms[i].label == label)
861 return prog->code_base + base + syms[i].offset;
862 return prog->code_base; /* no symbols or symbol not found */
863 }
864
865 void
866 nvc0_program_init_tcp_empty(struct nvc0_context *nvc0)
867 {
868 struct ureg_program *ureg;
869
870 ureg = ureg_create(PIPE_SHADER_TESS_CTRL);
871 if (!ureg)
872 return;
873
874 ureg_property(ureg, TGSI_PROPERTY_TCS_VERTICES_OUT, 1);
875 ureg_END(ureg);
876
877 nvc0->tcp_empty = ureg_create_shader_and_destroy(ureg, &nvc0->base.pipe);
878 }