4a4e8367d285d75c8223376fe6caa3cf9348373c
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nve4_compute.c
1 /*
2 * Copyright 2012 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christoph Bumiller
23 */
24
25 #include "nvc0/nvc0_context.h"
26 #include "nvc0/nve4_compute.h"
27
28 #include "codegen/nv50_ir_driver.h"
29
30 #ifdef DEBUG
31 static void nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc *);
32 #endif
33
34
35 int
36 nve4_screen_compute_setup(struct nvc0_screen *screen,
37 struct nouveau_pushbuf *push)
38 {
39 struct nouveau_device *dev = screen->base.device;
40 struct nouveau_object *chan = screen->base.channel;
41 int i;
42 int ret;
43 uint32_t obj_class;
44
45 switch (dev->chipset & ~0xf) {
46 case 0x100:
47 case 0xf0:
48 obj_class = NVF0_COMPUTE_CLASS; /* GK110 */
49 break;
50 case 0xe0:
51 obj_class = NVE4_COMPUTE_CLASS; /* GK104 */
52 break;
53 case 0x110:
54 obj_class = GM107_COMPUTE_CLASS;
55 break;
56 default:
57 NOUVEAU_ERR("unsupported chipset: NV%02x\n", dev->chipset);
58 return -1;
59 }
60
61 ret = nouveau_object_new(chan, 0xbeef00c0, obj_class, NULL, 0,
62 &screen->compute);
63 if (ret) {
64 NOUVEAU_ERR("Failed to allocate compute object: %d\n", ret);
65 return ret;
66 }
67
68 ret = nouveau_bo_new(dev, NV_VRAM_DOMAIN(&screen->base), 0, NVE4_CP_PARAM_SIZE, NULL,
69 &screen->parm);
70 if (ret)
71 return ret;
72
73 BEGIN_NVC0(push, SUBC_CP(NV01_SUBCHAN_OBJECT), 1);
74 PUSH_DATA (push, screen->compute->oclass);
75
76 BEGIN_NVC0(push, NVE4_CP(TEMP_ADDRESS_HIGH), 2);
77 PUSH_DATAh(push, screen->tls->offset);
78 PUSH_DATA (push, screen->tls->offset);
79 /* No idea why there are 2. Divide size by 2 to be safe.
80 * Actually this might be per-MP TEMP size and looks like I'm only using
81 * 2 MPs instead of all 8.
82 */
83 BEGIN_NVC0(push, NVE4_CP(MP_TEMP_SIZE_HIGH(0)), 3);
84 PUSH_DATAh(push, screen->tls->size / screen->mp_count);
85 PUSH_DATA (push, (screen->tls->size / screen->mp_count) & ~0x7fff);
86 PUSH_DATA (push, 0xff);
87 BEGIN_NVC0(push, NVE4_CP(MP_TEMP_SIZE_HIGH(1)), 3);
88 PUSH_DATAh(push, screen->tls->size / screen->mp_count);
89 PUSH_DATA (push, (screen->tls->size / screen->mp_count) & ~0x7fff);
90 PUSH_DATA (push, 0xff);
91
92 /* Unified address space ? Who needs that ? Certainly not OpenCL.
93 *
94 * FATAL: Buffers with addresses inside [0x1000000, 0x3000000] will NOT be
95 * accessible. We cannot prevent that at the moment, so expect failure.
96 */
97 BEGIN_NVC0(push, NVE4_CP(LOCAL_BASE), 1);
98 PUSH_DATA (push, 1 << 24);
99 BEGIN_NVC0(push, NVE4_CP(SHARED_BASE), 1);
100 PUSH_DATA (push, 2 << 24);
101
102 BEGIN_NVC0(push, NVE4_CP(CODE_ADDRESS_HIGH), 2);
103 PUSH_DATAh(push, screen->text->offset);
104 PUSH_DATA (push, screen->text->offset);
105
106 BEGIN_NVC0(push, SUBC_CP(0x0310), 1);
107 PUSH_DATA (push, (obj_class >= NVF0_COMPUTE_CLASS) ? 0x400 : 0x300);
108
109 /* NOTE: these do not affect the state used by the 3D object */
110 BEGIN_NVC0(push, NVE4_CP(TIC_ADDRESS_HIGH), 3);
111 PUSH_DATAh(push, screen->txc->offset);
112 PUSH_DATA (push, screen->txc->offset);
113 PUSH_DATA (push, NVC0_TIC_MAX_ENTRIES - 1);
114 BEGIN_NVC0(push, NVE4_CP(TSC_ADDRESS_HIGH), 3);
115 PUSH_DATAh(push, screen->txc->offset + 65536);
116 PUSH_DATA (push, screen->txc->offset + 65536);
117 PUSH_DATA (push, NVC0_TSC_MAX_ENTRIES - 1);
118
119 if (obj_class >= NVF0_COMPUTE_CLASS) {
120 /* The blob calls GK110_COMPUTE.FIRMWARE[0x6], along with the args (0x1)
121 * passed with GK110_COMPUTE.GRAPH.SCRATCH[0x2]. This is currently
122 * disabled because our firmware doesn't support these commands and the
123 * GPU hangs if they are used. */
124 BEGIN_NIC0(push, SUBC_CP(0x0248), 64);
125 for (i = 63; i >= 0; i--)
126 PUSH_DATA(push, 0x38000 | i);
127 IMMED_NVC0(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 0);
128 }
129
130 BEGIN_NVC0(push, NVE4_CP(TEX_CB_INDEX), 1);
131 PUSH_DATA (push, 0); /* does not interefere with 3D */
132
133 if (obj_class == NVF0_COMPUTE_CLASS)
134 IMMED_NVC0(push, SUBC_CP(0x02c4), 1);
135
136 /* MS sample coordinate offsets: these do not work with _ALT modes ! */
137 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
138 PUSH_DATAh(push, screen->parm->offset + NVE4_CP_INPUT_MS_OFFSETS);
139 PUSH_DATA (push, screen->parm->offset + NVE4_CP_INPUT_MS_OFFSETS);
140 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
141 PUSH_DATA (push, 64);
142 PUSH_DATA (push, 1);
143 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 17);
144 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
145 PUSH_DATA (push, 0); /* 0 */
146 PUSH_DATA (push, 0);
147 PUSH_DATA (push, 1); /* 1 */
148 PUSH_DATA (push, 0);
149 PUSH_DATA (push, 0); /* 2 */
150 PUSH_DATA (push, 1);
151 PUSH_DATA (push, 1); /* 3 */
152 PUSH_DATA (push, 1);
153 PUSH_DATA (push, 2); /* 4 */
154 PUSH_DATA (push, 0);
155 PUSH_DATA (push, 3); /* 5 */
156 PUSH_DATA (push, 0);
157 PUSH_DATA (push, 2); /* 6 */
158 PUSH_DATA (push, 1);
159 PUSH_DATA (push, 3); /* 7 */
160 PUSH_DATA (push, 1);
161
162 #ifdef DEBUG
163 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
164 PUSH_DATAh(push, screen->parm->offset + NVE4_CP_INPUT_TRAP_INFO_PTR);
165 PUSH_DATA (push, screen->parm->offset + NVE4_CP_INPUT_TRAP_INFO_PTR);
166 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
167 PUSH_DATA (push, 28);
168 PUSH_DATA (push, 1);
169 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 8);
170 PUSH_DATA (push, 1);
171 PUSH_DATA (push, screen->parm->offset + NVE4_CP_PARAM_TRAP_INFO);
172 PUSH_DATAh(push, screen->parm->offset + NVE4_CP_PARAM_TRAP_INFO);
173 PUSH_DATA (push, screen->tls->offset);
174 PUSH_DATAh(push, screen->tls->offset);
175 PUSH_DATA (push, screen->tls->size / 2); /* MP TEMP block size */
176 PUSH_DATA (push, screen->tls->size / 2 / 64); /* warp TEMP block size */
177 PUSH_DATA (push, 0); /* warp cfstack size */
178 #endif
179
180 BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
181 PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
182
183 return 0;
184 }
185
186
187 static void
188 nve4_compute_validate_surfaces(struct nvc0_context *nvc0)
189 {
190 struct nvc0_screen *screen = nvc0->screen;
191 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
192 struct nv50_surface *sf;
193 struct nv04_resource *res;
194 uint32_t mask;
195 unsigned i;
196 const unsigned t = 1;
197
198 mask = nvc0->surfaces_dirty[t];
199 while (mask) {
200 i = ffs(mask) - 1;
201 mask &= ~(1 << i);
202
203 /*
204 * NVE4's surface load/store instructions receive all the information
205 * directly instead of via binding points, so we have to supply them.
206 */
207 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
208 PUSH_DATAh(push, screen->parm->offset + NVE4_CP_INPUT_SUF(i));
209 PUSH_DATA (push, screen->parm->offset + NVE4_CP_INPUT_SUF(i));
210 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
211 PUSH_DATA (push, 64);
212 PUSH_DATA (push, 1);
213 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 17);
214 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
215
216 nve4_set_surface_info(push, nvc0->surfaces[t][i], screen);
217
218 sf = nv50_surface(nvc0->surfaces[t][i]);
219 if (sf) {
220 res = nv04_resource(sf->base.texture);
221
222 if (sf->base.writable)
223 BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RDWR);
224 else
225 BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RD);
226 }
227 }
228 if (nvc0->surfaces_dirty[t]) {
229 BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
230 PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
231 }
232
233 /* re-reference non-dirty surfaces */
234 mask = nvc0->surfaces_valid[t] & ~nvc0->surfaces_dirty[t];
235 while (mask) {
236 i = ffs(mask) - 1;
237 mask &= ~(1 << i);
238
239 sf = nv50_surface(nvc0->surfaces[t][i]);
240 res = nv04_resource(sf->base.texture);
241
242 if (sf->base.writable)
243 BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RDWR);
244 else
245 BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RD);
246 }
247
248 nvc0->surfaces_dirty[t] = 0;
249 }
250
251
252 /* Thankfully, textures with samplers follow the normal rules. */
253 static void
254 nve4_compute_validate_samplers(struct nvc0_context *nvc0)
255 {
256 bool need_flush = nve4_validate_tsc(nvc0, 5);
257 if (need_flush) {
258 BEGIN_NVC0(nvc0->base.pushbuf, NVE4_CP(TSC_FLUSH), 1);
259 PUSH_DATA (nvc0->base.pushbuf, 0);
260 }
261 }
262 /* (Code duplicated at bottom for various non-convincing reasons.
263 * E.g. we might want to use the COMPUTE subchannel to upload TIC/TSC
264 * entries to avoid a subchannel switch.
265 * Same for texture cache flushes.
266 * Also, the bufctx differs, and more IFs in the 3D version looks ugly.)
267 */
268 static void nve4_compute_validate_textures(struct nvc0_context *);
269
270 static void
271 nve4_compute_set_tex_handles(struct nvc0_context *nvc0)
272 {
273 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
274 uint64_t address;
275 const unsigned s = nvc0_shader_stage(PIPE_SHADER_COMPUTE);
276 unsigned i, n;
277 uint32_t dirty = nvc0->textures_dirty[s] | nvc0->samplers_dirty[s];
278
279 if (!dirty)
280 return;
281 i = ffs(dirty) - 1;
282 n = util_logbase2(dirty) + 1 - i;
283 assert(n);
284
285 address = nvc0->screen->parm->offset + NVE4_CP_INPUT_TEX(i);
286
287 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
288 PUSH_DATAh(push, address);
289 PUSH_DATA (push, address);
290 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
291 PUSH_DATA (push, n * 4);
292 PUSH_DATA (push, 0x1);
293 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + n);
294 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
295 PUSH_DATAp(push, &nvc0->tex_handles[s][i], n);
296
297 BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
298 PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
299
300 nvc0->textures_dirty[s] = 0;
301 nvc0->samplers_dirty[s] = 0;
302 }
303
304
305 static bool
306 nve4_compute_state_validate(struct nvc0_context *nvc0)
307 {
308 nvc0_compprog_validate(nvc0);
309 if (nvc0->dirty_cp & NVC0_NEW_CP_TEXTURES)
310 nve4_compute_validate_textures(nvc0);
311 if (nvc0->dirty_cp & NVC0_NEW_CP_SAMPLERS)
312 nve4_compute_validate_samplers(nvc0);
313 if (nvc0->dirty_cp & (NVC0_NEW_CP_TEXTURES | NVC0_NEW_CP_SAMPLERS))
314 nve4_compute_set_tex_handles(nvc0);
315 if (nvc0->dirty_cp & NVC0_NEW_CP_SURFACES)
316 nve4_compute_validate_surfaces(nvc0);
317 if (nvc0->dirty_cp & NVC0_NEW_CP_GLOBALS)
318 nvc0_compute_validate_globals(nvc0);
319
320 nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, false);
321
322 nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx_cp);
323 if (unlikely(nouveau_pushbuf_validate(nvc0->base.pushbuf)))
324 return false;
325 if (unlikely(nvc0->state.flushed))
326 nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, true);
327
328 return true;
329 }
330
331
332 static void
333 nve4_compute_upload_input(struct nvc0_context *nvc0, const void *input,
334 const uint *block_layout,
335 const uint *grid_layout)
336 {
337 struct nvc0_screen *screen = nvc0->screen;
338 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
339 struct nvc0_program *cp = nvc0->compprog;
340
341 if (cp->parm_size) {
342 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
343 PUSH_DATAh(push, screen->parm->offset);
344 PUSH_DATA (push, screen->parm->offset);
345 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
346 PUSH_DATA (push, cp->parm_size);
347 PUSH_DATA (push, 0x1);
348 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (cp->parm_size / 4));
349 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
350 PUSH_DATAp(push, input, cp->parm_size / 4);
351 }
352 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
353 PUSH_DATAh(push, screen->parm->offset + NVE4_CP_INPUT_GRID_INFO(0));
354 PUSH_DATA (push, screen->parm->offset + NVE4_CP_INPUT_GRID_INFO(0));
355 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
356 PUSH_DATA (push, 7 * 4);
357 PUSH_DATA (push, 0x1);
358 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 7);
359 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
360 PUSH_DATAp(push, block_layout, 3);
361 PUSH_DATAp(push, grid_layout, 3);
362 PUSH_DATA (push, 0);
363
364 BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
365 PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
366 }
367
368 static inline uint8_t
369 nve4_compute_derive_cache_split(struct nvc0_context *nvc0, uint32_t shared_size)
370 {
371 if (shared_size > (32 << 10))
372 return NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1;
373 if (shared_size > (16 << 10))
374 return NVE4_3D_CACHE_SPLIT_32K_SHARED_32K_L1;
375 return NVC1_3D_CACHE_SPLIT_16K_SHARED_48K_L1;
376 }
377
378 static void
379 nve4_compute_setup_launch_desc(struct nvc0_context *nvc0,
380 struct nve4_cp_launch_desc *desc,
381 uint32_t label,
382 const uint *block_layout,
383 const uint *grid_layout)
384 {
385 const struct nvc0_screen *screen = nvc0->screen;
386 const struct nvc0_program *cp = nvc0->compprog;
387 unsigned i;
388
389 nve4_cp_launch_desc_init_default(desc);
390
391 desc->entry = nvc0_program_symbol_offset(cp, label);
392
393 desc->griddim_x = grid_layout[0];
394 desc->griddim_y = grid_layout[1];
395 desc->griddim_z = grid_layout[2];
396 desc->blockdim_x = block_layout[0];
397 desc->blockdim_y = block_layout[1];
398 desc->blockdim_z = block_layout[2];
399
400 desc->shared_size = align(cp->cp.smem_size, 0x100);
401 desc->local_size_p = align(cp->cp.lmem_size, 0x10);
402 desc->local_size_n = 0;
403 desc->cstack_size = 0x800;
404 desc->cache_split = nve4_compute_derive_cache_split(nvc0, cp->cp.smem_size);
405
406 desc->gpr_alloc = cp->num_gprs;
407 desc->bar_alloc = cp->num_barriers;
408
409 for (i = 0; i < 7; ++i) {
410 const unsigned s = 5;
411 if (nvc0->constbuf[s][i].u.buf)
412 nve4_cp_launch_desc_set_ctx_cb(desc, i + 1, &nvc0->constbuf[s][i]);
413 }
414 nve4_cp_launch_desc_set_cb(desc, 0, screen->parm, 0, NVE4_CP_INPUT_SIZE);
415 }
416
417 static inline struct nve4_cp_launch_desc *
418 nve4_compute_alloc_launch_desc(struct nouveau_context *nv,
419 struct nouveau_bo **pbo, uint64_t *pgpuaddr)
420 {
421 uint8_t *ptr = nouveau_scratch_get(nv, 512, pgpuaddr, pbo);
422 if (!ptr)
423 return NULL;
424 if (*pgpuaddr & 255) {
425 unsigned adj = 256 - (*pgpuaddr & 255);
426 ptr += adj;
427 *pgpuaddr += adj;
428 }
429 return (struct nve4_cp_launch_desc *)ptr;
430 }
431
432 void
433 nve4_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
434 {
435 struct nvc0_context *nvc0 = nvc0_context(pipe);
436 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
437 struct nve4_cp_launch_desc *desc;
438 uint64_t desc_gpuaddr;
439 struct nouveau_bo *desc_bo;
440 int ret;
441
442 desc = nve4_compute_alloc_launch_desc(&nvc0->base, &desc_bo, &desc_gpuaddr);
443 if (!desc) {
444 ret = -1;
445 goto out;
446 }
447 BCTX_REFN_bo(nvc0->bufctx_cp, CP_DESC, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
448 desc_bo);
449
450 ret = !nve4_compute_state_validate(nvc0);
451 if (ret)
452 goto out;
453
454 nve4_compute_setup_launch_desc(nvc0, desc, info->pc,
455 info->block, info->grid);
456 #ifdef DEBUG
457 if (debug_get_num_option("NV50_PROG_DEBUG", 0))
458 nve4_compute_dump_launch_desc(desc);
459 #endif
460
461 nve4_compute_upload_input(nvc0, info->input, info->block, info->grid);
462
463 /* upload descriptor and flush */
464 #if 0
465 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
466 PUSH_DATAh(push, desc_gpuaddr);
467 PUSH_DATA (push, desc_gpuaddr);
468 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
469 PUSH_DATA (push, 256);
470 PUSH_DATA (push, 1);
471 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + (256 / 4));
472 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x08 << 1));
473 PUSH_DATAp(push, (const uint32_t *)desc, 256 / 4);
474 BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
475 PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB | NVE4_COMPUTE_FLUSH_CODE);
476 #endif
477 BEGIN_NVC0(push, NVE4_CP(LAUNCH_DESC_ADDRESS), 1);
478 PUSH_DATA (push, desc_gpuaddr >> 8);
479 BEGIN_NVC0(push, NVE4_CP(LAUNCH), 1);
480 PUSH_DATA (push, 0x3);
481 BEGIN_NVC0(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 1);
482 PUSH_DATA (push, 0);
483
484 out:
485 if (ret)
486 NOUVEAU_ERR("Failed to launch grid !\n");
487 nouveau_scratch_done(&nvc0->base);
488 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_DESC);
489 }
490
491
492 #define NVE4_TIC_ENTRY_INVALID 0x000fffff
493
494 static void
495 nve4_compute_validate_textures(struct nvc0_context *nvc0)
496 {
497 struct nouveau_bo *txc = nvc0->screen->txc;
498 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
499 const unsigned s = 5;
500 unsigned i;
501 uint32_t commands[2][NVE4_CP_INPUT_TEX_MAX];
502 unsigned n[2] = { 0, 0 };
503
504 for (i = 0; i < nvc0->num_textures[s]; ++i) {
505 struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
506 struct nv04_resource *res;
507 const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
508
509 if (!tic) {
510 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
511 continue;
512 }
513 res = nv04_resource(tic->pipe.texture);
514
515 if (tic->id < 0) {
516 tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
517
518 PUSH_SPACE(push, 16);
519 BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
520 PUSH_DATAh(push, txc->offset + (tic->id * 32));
521 PUSH_DATA (push, txc->offset + (tic->id * 32));
522 BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
523 PUSH_DATA (push, 32);
524 PUSH_DATA (push, 1);
525 BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 9);
526 PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
527 PUSH_DATAp(push, &tic->tic[0], 8);
528
529 commands[0][n[0]++] = (tic->id << 4) | 1;
530 } else
531 if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
532 commands[1][n[1]++] = (tic->id << 4) | 1;
533 }
534 nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
535
536 res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
537 res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
538
539 nvc0->tex_handles[s][i] &= ~NVE4_TIC_ENTRY_INVALID;
540 nvc0->tex_handles[s][i] |= tic->id;
541 if (dirty)
542 BCTX_REFN(nvc0->bufctx_cp, CP_TEX(i), res, RD);
543 }
544 for (; i < nvc0->state.num_textures[s]; ++i)
545 nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
546
547 if (n[0]) {
548 BEGIN_NIC0(push, NVE4_CP(TIC_FLUSH), n[0]);
549 PUSH_DATAp(push, commands[0], n[0]);
550 }
551 if (n[1]) {
552 BEGIN_NIC0(push, NVE4_CP(TEX_CACHE_CTL), n[1]);
553 PUSH_DATAp(push, commands[1], n[1]);
554 }
555
556 nvc0->state.num_textures[s] = nvc0->num_textures[s];
557 }
558
559
560 #ifdef DEBUG
561 static const char *nve4_cache_split_name(unsigned value)
562 {
563 switch (value) {
564 case NVC1_3D_CACHE_SPLIT_16K_SHARED_48K_L1: return "16K_SHARED_48K_L1";
565 case NVE4_3D_CACHE_SPLIT_32K_SHARED_32K_L1: return "32K_SHARED_32K_L1";
566 case NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1: return "48K_SHARED_16K_L1";
567 default:
568 return "(invalid)";
569 }
570 }
571
572 static void
573 nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc *desc)
574 {
575 const uint32_t *data = (const uint32_t *)desc;
576 unsigned i;
577 bool zero = false;
578
579 debug_printf("COMPUTE LAUNCH DESCRIPTOR:\n");
580
581 for (i = 0; i < sizeof(*desc); i += 4) {
582 if (data[i / 4]) {
583 debug_printf("[%x]: 0x%08x\n", i, data[i / 4]);
584 zero = false;
585 } else
586 if (!zero) {
587 debug_printf("...\n");
588 zero = true;
589 }
590 }
591
592 debug_printf("entry = 0x%x\n", desc->entry);
593 debug_printf("grid dimensions = %ux%ux%u\n",
594 desc->griddim_x, desc->griddim_y, desc->griddim_z);
595 debug_printf("block dimensions = %ux%ux%u\n",
596 desc->blockdim_x, desc->blockdim_y, desc->blockdim_z);
597 debug_printf("s[] size: 0x%x\n", desc->shared_size);
598 debug_printf("l[] size: -0x%x / +0x%x\n",
599 desc->local_size_n, desc->local_size_p);
600 debug_printf("stack size: 0x%x\n", desc->cstack_size);
601 debug_printf("barrier count: %u\n", desc->bar_alloc);
602 debug_printf("$r count: %u\n", desc->gpr_alloc);
603 debug_printf("cache split: %s\n", nve4_cache_split_name(desc->cache_split));
604
605 for (i = 0; i < 8; ++i) {
606 uint64_t address;
607 uint32_t size = desc->cb[i].size;
608 bool valid = !!(desc->cb_mask & (1 << i));
609
610 address = ((uint64_t)desc->cb[i].address_h << 32) | desc->cb[i].address_l;
611
612 if (!valid && !address && !size)
613 continue;
614 debug_printf("CB[%u]: address = 0x%"PRIx64", size 0x%x%s\n",
615 i, address, size, valid ? "" : " (invalid)");
616 }
617 }
618 #endif
619
620 #ifdef NOUVEAU_NVE4_MP_TRAP_HANDLER
621 static void
622 nve4_compute_trap_info(struct nvc0_context *nvc0)
623 {
624 struct nvc0_screen *screen = nvc0->screen;
625 struct nouveau_bo *bo = screen->parm;
626 int ret, i;
627 volatile struct nve4_mp_trap_info *info;
628 uint8_t *map;
629
630 ret = nouveau_bo_map(bo, NOUVEAU_BO_RDWR, nvc0->base.client);
631 if (ret)
632 return;
633 map = (uint8_t *)bo->map;
634 info = (volatile struct nve4_mp_trap_info *)(map + NVE4_CP_PARAM_TRAP_INFO);
635
636 if (info->lock) {
637 debug_printf("trapstat = %08x\n", info->trapstat);
638 debug_printf("warperr = %08x\n", info->warperr);
639 debug_printf("PC = %x\n", info->pc);
640 debug_printf("tid = %u %u %u\n",
641 info->tid[0], info->tid[1], info->tid[2]);
642 debug_printf("ctaid = %u %u %u\n",
643 info->ctaid[0], info->ctaid[1], info->ctaid[2]);
644 for (i = 0; i <= 63; ++i)
645 debug_printf("$r%i = %08x\n", i, info->r[i]);
646 for (i = 0; i <= 6; ++i)
647 debug_printf("$p%i = %i\n", i, (info->flags >> i) & 1);
648 debug_printf("$c = %x\n", info->flags >> 12);
649 }
650 info->lock = 0;
651 }
652 #endif