2 * Copyright 2012 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christoph Bumiller
25 #include "nvc0/nvc0_context.h"
26 #include "nvc0/nve4_compute.h"
28 #include "codegen/nv50_ir_driver.h"
31 static void nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc
*);
36 nve4_screen_compute_setup(struct nvc0_screen
*screen
,
37 struct nouveau_pushbuf
*push
)
39 struct nouveau_device
*dev
= screen
->base
.device
;
40 struct nouveau_object
*chan
= screen
->base
.channel
;
45 switch (dev
->chipset
& ~0xf) {
48 obj_class
= NVF0_COMPUTE_CLASS
; /* GK110 */
51 obj_class
= NVE4_COMPUTE_CLASS
; /* GK104 */
54 obj_class
= GM107_COMPUTE_CLASS
;
57 NOUVEAU_ERR("unsupported chipset: NV%02x\n", dev
->chipset
);
61 ret
= nouveau_object_new(chan
, 0xbeef00c0, obj_class
, NULL
, 0,
64 NOUVEAU_ERR("Failed to allocate compute object: %d\n", ret
);
68 ret
= nouveau_bo_new(dev
, NV_VRAM_DOMAIN(&screen
->base
), 0, NVE4_CP_PARAM_SIZE
, NULL
,
73 BEGIN_NVC0(push
, SUBC_CP(NV01_SUBCHAN_OBJECT
), 1);
74 PUSH_DATA (push
, screen
->compute
->oclass
);
76 BEGIN_NVC0(push
, NVE4_CP(TEMP_ADDRESS_HIGH
), 2);
77 PUSH_DATAh(push
, screen
->tls
->offset
);
78 PUSH_DATA (push
, screen
->tls
->offset
);
79 /* No idea why there are 2. Divide size by 2 to be safe.
80 * Actually this might be per-MP TEMP size and looks like I'm only using
81 * 2 MPs instead of all 8.
83 BEGIN_NVC0(push
, NVE4_CP(MP_TEMP_SIZE_HIGH(0)), 3);
84 PUSH_DATAh(push
, screen
->tls
->size
/ screen
->mp_count
);
85 PUSH_DATA (push
, (screen
->tls
->size
/ screen
->mp_count
) & ~0x7fff);
86 PUSH_DATA (push
, 0xff);
87 BEGIN_NVC0(push
, NVE4_CP(MP_TEMP_SIZE_HIGH(1)), 3);
88 PUSH_DATAh(push
, screen
->tls
->size
/ screen
->mp_count
);
89 PUSH_DATA (push
, (screen
->tls
->size
/ screen
->mp_count
) & ~0x7fff);
90 PUSH_DATA (push
, 0xff);
92 /* Unified address space ? Who needs that ? Certainly not OpenCL.
94 * FATAL: Buffers with addresses inside [0x1000000, 0x3000000] will NOT be
95 * accessible. We cannot prevent that at the moment, so expect failure.
97 BEGIN_NVC0(push
, NVE4_CP(LOCAL_BASE
), 1);
98 PUSH_DATA (push
, 1 << 24);
99 BEGIN_NVC0(push
, NVE4_CP(SHARED_BASE
), 1);
100 PUSH_DATA (push
, 2 << 24);
102 BEGIN_NVC0(push
, NVE4_CP(CODE_ADDRESS_HIGH
), 2);
103 PUSH_DATAh(push
, screen
->text
->offset
);
104 PUSH_DATA (push
, screen
->text
->offset
);
106 BEGIN_NVC0(push
, SUBC_CP(0x0310), 1);
107 PUSH_DATA (push
, (obj_class
>= NVF0_COMPUTE_CLASS
) ? 0x400 : 0x300);
109 /* NOTE: these do not affect the state used by the 3D object */
110 BEGIN_NVC0(push
, NVE4_CP(TIC_ADDRESS_HIGH
), 3);
111 PUSH_DATAh(push
, screen
->txc
->offset
);
112 PUSH_DATA (push
, screen
->txc
->offset
);
113 PUSH_DATA (push
, NVC0_TIC_MAX_ENTRIES
- 1);
114 BEGIN_NVC0(push
, NVE4_CP(TSC_ADDRESS_HIGH
), 3);
115 PUSH_DATAh(push
, screen
->txc
->offset
+ 65536);
116 PUSH_DATA (push
, screen
->txc
->offset
+ 65536);
117 PUSH_DATA (push
, NVC0_TSC_MAX_ENTRIES
- 1);
119 if (obj_class
>= NVF0_COMPUTE_CLASS
) {
120 /* The blob calls GK110_COMPUTE.FIRMWARE[0x6], along with the args (0x1)
121 * passed with GK110_COMPUTE.GRAPH.SCRATCH[0x2]. This is currently
122 * disabled because our firmware doesn't support these commands and the
123 * GPU hangs if they are used. */
124 BEGIN_NIC0(push
, SUBC_CP(0x0248), 64);
125 for (i
= 63; i
>= 0; i
--)
126 PUSH_DATA(push
, 0x38000 | i
);
127 IMMED_NVC0(push
, SUBC_CP(NV50_GRAPH_SERIALIZE
), 0);
130 BEGIN_NVC0(push
, NVE4_CP(TEX_CB_INDEX
), 1);
131 PUSH_DATA (push
, 0); /* does not interefere with 3D */
133 if (obj_class
== NVF0_COMPUTE_CLASS
)
134 IMMED_NVC0(push
, SUBC_CP(0x02c4), 1);
136 /* MS sample coordinate offsets: these do not work with _ALT modes ! */
137 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH
), 2);
138 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_INPUT_MS_OFFSETS
);
139 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_INPUT_MS_OFFSETS
);
140 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_LINE_LENGTH_IN
), 2);
141 PUSH_DATA (push
, 64);
143 BEGIN_1IC0(push
, NVE4_CP(UPLOAD_EXEC
), 17);
144 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
145 PUSH_DATA (push
, 0); /* 0 */
147 PUSH_DATA (push
, 1); /* 1 */
149 PUSH_DATA (push
, 0); /* 2 */
151 PUSH_DATA (push
, 1); /* 3 */
153 PUSH_DATA (push
, 2); /* 4 */
155 PUSH_DATA (push
, 3); /* 5 */
157 PUSH_DATA (push
, 2); /* 6 */
159 PUSH_DATA (push
, 3); /* 7 */
163 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH
), 2);
164 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_INPUT_TRAP_INFO_PTR
);
165 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_INPUT_TRAP_INFO_PTR
);
166 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_LINE_LENGTH_IN
), 2);
167 PUSH_DATA (push
, 28);
169 BEGIN_1IC0(push
, NVE4_CP(UPLOAD_EXEC
), 8);
171 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_PARAM_TRAP_INFO
);
172 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_PARAM_TRAP_INFO
);
173 PUSH_DATA (push
, screen
->tls
->offset
);
174 PUSH_DATAh(push
, screen
->tls
->offset
);
175 PUSH_DATA (push
, screen
->tls
->size
/ 2); /* MP TEMP block size */
176 PUSH_DATA (push
, screen
->tls
->size
/ 2 / 64); /* warp TEMP block size */
177 PUSH_DATA (push
, 0); /* warp cfstack size */
180 BEGIN_NVC0(push
, NVE4_CP(FLUSH
), 1);
181 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
);
188 nve4_compute_validate_surfaces(struct nvc0_context
*nvc0
)
190 struct nvc0_screen
*screen
= nvc0
->screen
;
191 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
192 struct nv50_surface
*sf
;
193 struct nv04_resource
*res
;
196 const unsigned t
= 1;
198 mask
= nvc0
->surfaces_dirty
[t
];
204 * NVE4's surface load/store instructions receive all the information
205 * directly instead of via binding points, so we have to supply them.
207 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH
), 2);
208 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_INPUT_SUF(i
));
209 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_INPUT_SUF(i
));
210 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_LINE_LENGTH_IN
), 2);
211 PUSH_DATA (push
, 64);
213 BEGIN_1IC0(push
, NVE4_CP(UPLOAD_EXEC
), 17);
214 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
216 nve4_set_surface_info(push
, nvc0
->surfaces
[t
][i
], screen
);
218 sf
= nv50_surface(nvc0
->surfaces
[t
][i
]);
220 res
= nv04_resource(sf
->base
.texture
);
222 if (sf
->base
.writable
)
223 BCTX_REFN(nvc0
->bufctx_cp
, CP_SUF
, res
, RDWR
);
225 BCTX_REFN(nvc0
->bufctx_cp
, CP_SUF
, res
, RD
);
228 if (nvc0
->surfaces_dirty
[t
]) {
229 BEGIN_NVC0(push
, NVE4_CP(FLUSH
), 1);
230 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
);
233 /* re-reference non-dirty surfaces */
234 mask
= nvc0
->surfaces_valid
[t
] & ~nvc0
->surfaces_dirty
[t
];
239 sf
= nv50_surface(nvc0
->surfaces
[t
][i
]);
240 res
= nv04_resource(sf
->base
.texture
);
242 if (sf
->base
.writable
)
243 BCTX_REFN(nvc0
->bufctx_cp
, CP_SUF
, res
, RDWR
);
245 BCTX_REFN(nvc0
->bufctx_cp
, CP_SUF
, res
, RD
);
248 nvc0
->surfaces_dirty
[t
] = 0;
252 /* Thankfully, textures with samplers follow the normal rules. */
254 nve4_compute_validate_samplers(struct nvc0_context
*nvc0
)
256 bool need_flush
= nve4_validate_tsc(nvc0
, 5);
258 BEGIN_NVC0(nvc0
->base
.pushbuf
, NVE4_CP(TSC_FLUSH
), 1);
259 PUSH_DATA (nvc0
->base
.pushbuf
, 0);
262 /* (Code duplicated at bottom for various non-convincing reasons.
263 * E.g. we might want to use the COMPUTE subchannel to upload TIC/TSC
264 * entries to avoid a subchannel switch.
265 * Same for texture cache flushes.
266 * Also, the bufctx differs, and more IFs in the 3D version looks ugly.)
268 static void nve4_compute_validate_textures(struct nvc0_context
*);
271 nve4_compute_set_tex_handles(struct nvc0_context
*nvc0
)
273 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
275 const unsigned s
= nvc0_shader_stage(PIPE_SHADER_COMPUTE
);
277 uint32_t dirty
= nvc0
->textures_dirty
[s
] | nvc0
->samplers_dirty
[s
];
282 n
= util_logbase2(dirty
) + 1 - i
;
285 address
= nvc0
->screen
->parm
->offset
+ NVE4_CP_INPUT_TEX(i
);
287 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH
), 2);
288 PUSH_DATAh(push
, address
);
289 PUSH_DATA (push
, address
);
290 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_LINE_LENGTH_IN
), 2);
291 PUSH_DATA (push
, n
* 4);
292 PUSH_DATA (push
, 0x1);
293 BEGIN_1IC0(push
, NVE4_CP(UPLOAD_EXEC
), 1 + n
);
294 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
295 PUSH_DATAp(push
, &nvc0
->tex_handles
[s
][i
], n
);
297 BEGIN_NVC0(push
, NVE4_CP(FLUSH
), 1);
298 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
);
300 nvc0
->textures_dirty
[s
] = 0;
301 nvc0
->samplers_dirty
[s
] = 0;
304 static struct nvc0_state_validate
305 validate_list_cp
[] = {
306 { nvc0_compprog_validate
, NVC0_NEW_CP_PROGRAM
},
307 { nve4_compute_validate_textures
, NVC0_NEW_CP_TEXTURES
},
308 { nve4_compute_validate_samplers
, NVC0_NEW_CP_SAMPLERS
},
309 { nve4_compute_set_tex_handles
, NVC0_NEW_CP_TEXTURES
|
310 NVC0_NEW_CP_SAMPLERS
},
311 { nve4_compute_validate_surfaces
, NVC0_NEW_CP_SURFACES
},
312 { nvc0_compute_validate_globals
, NVC0_NEW_CP_GLOBALS
},
316 nve4_state_validate_cp(struct nvc0_context
*nvc0
, uint32_t mask
)
320 ret
= nvc0_state_validate(nvc0
, mask
, validate_list_cp
,
321 ARRAY_SIZE(validate_list_cp
), &nvc0
->dirty_cp
,
324 if (unlikely(nvc0
->state
.flushed
))
325 nvc0_bufctx_fence(nvc0
, nvc0
->bufctx_cp
, true);
330 nve4_compute_upload_input(struct nvc0_context
*nvc0
, const void *input
,
331 const uint
*block_layout
,
332 const uint
*grid_layout
)
334 struct nvc0_screen
*screen
= nvc0
->screen
;
335 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
336 struct nvc0_program
*cp
= nvc0
->compprog
;
339 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH
), 2);
340 PUSH_DATAh(push
, screen
->parm
->offset
);
341 PUSH_DATA (push
, screen
->parm
->offset
);
342 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_LINE_LENGTH_IN
), 2);
343 PUSH_DATA (push
, cp
->parm_size
);
344 PUSH_DATA (push
, 0x1);
345 BEGIN_1IC0(push
, NVE4_CP(UPLOAD_EXEC
), 1 + (cp
->parm_size
/ 4));
346 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
347 PUSH_DATAp(push
, input
, cp
->parm_size
/ 4);
349 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH
), 2);
350 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_INPUT_GRID_INFO(0));
351 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_INPUT_GRID_INFO(0));
352 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_LINE_LENGTH_IN
), 2);
353 PUSH_DATA (push
, 7 * 4);
354 PUSH_DATA (push
, 0x1);
355 BEGIN_1IC0(push
, NVE4_CP(UPLOAD_EXEC
), 1 + 7);
356 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
357 PUSH_DATAp(push
, block_layout
, 3);
358 PUSH_DATAp(push
, grid_layout
, 3);
361 BEGIN_NVC0(push
, NVE4_CP(FLUSH
), 1);
362 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
);
365 static inline uint8_t
366 nve4_compute_derive_cache_split(struct nvc0_context
*nvc0
, uint32_t shared_size
)
368 if (shared_size
> (32 << 10))
369 return NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1
;
370 if (shared_size
> (16 << 10))
371 return NVE4_3D_CACHE_SPLIT_32K_SHARED_32K_L1
;
372 return NVC1_3D_CACHE_SPLIT_16K_SHARED_48K_L1
;
376 nve4_compute_setup_launch_desc(struct nvc0_context
*nvc0
,
377 struct nve4_cp_launch_desc
*desc
,
379 const uint
*block_layout
,
380 const uint
*grid_layout
)
382 const struct nvc0_screen
*screen
= nvc0
->screen
;
383 const struct nvc0_program
*cp
= nvc0
->compprog
;
386 nve4_cp_launch_desc_init_default(desc
);
388 desc
->entry
= nvc0_program_symbol_offset(cp
, label
);
390 desc
->griddim_x
= grid_layout
[0];
391 desc
->griddim_y
= grid_layout
[1];
392 desc
->griddim_z
= grid_layout
[2];
393 desc
->blockdim_x
= block_layout
[0];
394 desc
->blockdim_y
= block_layout
[1];
395 desc
->blockdim_z
= block_layout
[2];
397 desc
->shared_size
= align(cp
->cp
.smem_size
, 0x100);
398 desc
->local_size_p
= align(cp
->cp
.lmem_size
, 0x10);
399 desc
->local_size_n
= 0;
400 desc
->cstack_size
= 0x800;
401 desc
->cache_split
= nve4_compute_derive_cache_split(nvc0
, cp
->cp
.smem_size
);
403 desc
->gpr_alloc
= cp
->num_gprs
;
404 desc
->bar_alloc
= cp
->num_barriers
;
406 for (i
= 0; i
< 7; ++i
) {
407 const unsigned s
= 5;
408 if (nvc0
->constbuf
[s
][i
].u
.buf
)
409 nve4_cp_launch_desc_set_ctx_cb(desc
, i
+ 1, &nvc0
->constbuf
[s
][i
]);
411 nve4_cp_launch_desc_set_cb(desc
, 0, screen
->parm
, 0, NVE4_CP_INPUT_SIZE
);
414 static inline struct nve4_cp_launch_desc
*
415 nve4_compute_alloc_launch_desc(struct nouveau_context
*nv
,
416 struct nouveau_bo
**pbo
, uint64_t *pgpuaddr
)
418 uint8_t *ptr
= nouveau_scratch_get(nv
, 512, pgpuaddr
, pbo
);
421 if (*pgpuaddr
& 255) {
422 unsigned adj
= 256 - (*pgpuaddr
& 255);
426 return (struct nve4_cp_launch_desc
*)ptr
;
430 nve4_launch_grid(struct pipe_context
*pipe
, const struct pipe_grid_info
*info
)
432 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
433 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
434 struct nve4_cp_launch_desc
*desc
;
435 uint64_t desc_gpuaddr
;
436 struct nouveau_bo
*desc_bo
;
439 desc
= nve4_compute_alloc_launch_desc(&nvc0
->base
, &desc_bo
, &desc_gpuaddr
);
444 BCTX_REFN_bo(nvc0
->bufctx_cp
, CP_DESC
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
447 ret
= !nve4_state_validate_cp(nvc0
, ~0);
451 nve4_compute_setup_launch_desc(nvc0
, desc
, info
->pc
,
452 info
->block
, info
->grid
);
454 if (debug_get_num_option("NV50_PROG_DEBUG", 0))
455 nve4_compute_dump_launch_desc(desc
);
458 nve4_compute_upload_input(nvc0
, info
->input
, info
->block
, info
->grid
);
460 /* upload descriptor and flush */
462 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH
), 2);
463 PUSH_DATAh(push
, desc_gpuaddr
);
464 PUSH_DATA (push
, desc_gpuaddr
);
465 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_LINE_LENGTH_IN
), 2);
466 PUSH_DATA (push
, 256);
468 BEGIN_1IC0(push
, NVE4_CP(UPLOAD_EXEC
), 1 + (256 / 4));
469 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x08 << 1));
470 PUSH_DATAp(push
, (const uint32_t *)desc
, 256 / 4);
471 BEGIN_NVC0(push
, NVE4_CP(FLUSH
), 1);
472 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
| NVE4_COMPUTE_FLUSH_CODE
);
474 BEGIN_NVC0(push
, NVE4_CP(LAUNCH_DESC_ADDRESS
), 1);
475 PUSH_DATA (push
, desc_gpuaddr
>> 8);
476 BEGIN_NVC0(push
, NVE4_CP(LAUNCH
), 1);
477 PUSH_DATA (push
, 0x3);
478 BEGIN_NVC0(push
, SUBC_CP(NV50_GRAPH_SERIALIZE
), 1);
483 NOUVEAU_ERR("Failed to launch grid !\n");
484 nouveau_scratch_done(&nvc0
->base
);
485 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_DESC
);
489 #define NVE4_TIC_ENTRY_INVALID 0x000fffff
492 nve4_compute_validate_textures(struct nvc0_context
*nvc0
)
494 struct nouveau_bo
*txc
= nvc0
->screen
->txc
;
495 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
496 const unsigned s
= 5;
498 uint32_t commands
[2][NVE4_CP_INPUT_TEX_MAX
];
499 unsigned n
[2] = { 0, 0 };
501 for (i
= 0; i
< nvc0
->num_textures
[s
]; ++i
) {
502 struct nv50_tic_entry
*tic
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
503 struct nv04_resource
*res
;
504 const bool dirty
= !!(nvc0
->textures_dirty
[s
] & (1 << i
));
507 nvc0
->tex_handles
[s
][i
] |= NVE4_TIC_ENTRY_INVALID
;
510 res
= nv04_resource(tic
->pipe
.texture
);
513 tic
->id
= nvc0_screen_tic_alloc(nvc0
->screen
, tic
);
515 PUSH_SPACE(push
, 16);
516 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH
), 2);
517 PUSH_DATAh(push
, txc
->offset
+ (tic
->id
* 32));
518 PUSH_DATA (push
, txc
->offset
+ (tic
->id
* 32));
519 BEGIN_NVC0(push
, NVE4_CP(UPLOAD_LINE_LENGTH_IN
), 2);
520 PUSH_DATA (push
, 32);
522 BEGIN_1IC0(push
, NVE4_CP(UPLOAD_EXEC
), 9);
523 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
524 PUSH_DATAp(push
, &tic
->tic
[0], 8);
526 commands
[0][n
[0]++] = (tic
->id
<< 4) | 1;
528 if (res
->status
& NOUVEAU_BUFFER_STATUS_GPU_WRITING
) {
529 commands
[1][n
[1]++] = (tic
->id
<< 4) | 1;
531 nvc0
->screen
->tic
.lock
[tic
->id
/ 32] |= 1 << (tic
->id
% 32);
533 res
->status
&= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING
;
534 res
->status
|= NOUVEAU_BUFFER_STATUS_GPU_READING
;
536 nvc0
->tex_handles
[s
][i
] &= ~NVE4_TIC_ENTRY_INVALID
;
537 nvc0
->tex_handles
[s
][i
] |= tic
->id
;
539 BCTX_REFN(nvc0
->bufctx_cp
, CP_TEX(i
), res
, RD
);
541 for (; i
< nvc0
->state
.num_textures
[s
]; ++i
)
542 nvc0
->tex_handles
[s
][i
] |= NVE4_TIC_ENTRY_INVALID
;
545 BEGIN_NIC0(push
, NVE4_CP(TIC_FLUSH
), n
[0]);
546 PUSH_DATAp(push
, commands
[0], n
[0]);
549 BEGIN_NIC0(push
, NVE4_CP(TEX_CACHE_CTL
), n
[1]);
550 PUSH_DATAp(push
, commands
[1], n
[1]);
553 nvc0
->state
.num_textures
[s
] = nvc0
->num_textures
[s
];
558 static const char *nve4_cache_split_name(unsigned value
)
561 case NVC1_3D_CACHE_SPLIT_16K_SHARED_48K_L1
: return "16K_SHARED_48K_L1";
562 case NVE4_3D_CACHE_SPLIT_32K_SHARED_32K_L1
: return "32K_SHARED_32K_L1";
563 case NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1
: return "48K_SHARED_16K_L1";
570 nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc
*desc
)
572 const uint32_t *data
= (const uint32_t *)desc
;
576 debug_printf("COMPUTE LAUNCH DESCRIPTOR:\n");
578 for (i
= 0; i
< sizeof(*desc
); i
+= 4) {
580 debug_printf("[%x]: 0x%08x\n", i
, data
[i
/ 4]);
584 debug_printf("...\n");
589 debug_printf("entry = 0x%x\n", desc
->entry
);
590 debug_printf("grid dimensions = %ux%ux%u\n",
591 desc
->griddim_x
, desc
->griddim_y
, desc
->griddim_z
);
592 debug_printf("block dimensions = %ux%ux%u\n",
593 desc
->blockdim_x
, desc
->blockdim_y
, desc
->blockdim_z
);
594 debug_printf("s[] size: 0x%x\n", desc
->shared_size
);
595 debug_printf("l[] size: -0x%x / +0x%x\n",
596 desc
->local_size_n
, desc
->local_size_p
);
597 debug_printf("stack size: 0x%x\n", desc
->cstack_size
);
598 debug_printf("barrier count: %u\n", desc
->bar_alloc
);
599 debug_printf("$r count: %u\n", desc
->gpr_alloc
);
600 debug_printf("cache split: %s\n", nve4_cache_split_name(desc
->cache_split
));
602 for (i
= 0; i
< 8; ++i
) {
604 uint32_t size
= desc
->cb
[i
].size
;
605 bool valid
= !!(desc
->cb_mask
& (1 << i
));
607 address
= ((uint64_t)desc
->cb
[i
].address_h
<< 32) | desc
->cb
[i
].address_l
;
609 if (!valid
&& !address
&& !size
)
611 debug_printf("CB[%u]: address = 0x%"PRIx64
", size 0x%x%s\n",
612 i
, address
, size
, valid
? "" : " (invalid)");
617 #ifdef NOUVEAU_NVE4_MP_TRAP_HANDLER
619 nve4_compute_trap_info(struct nvc0_context
*nvc0
)
621 struct nvc0_screen
*screen
= nvc0
->screen
;
622 struct nouveau_bo
*bo
= screen
->parm
;
624 volatile struct nve4_mp_trap_info
*info
;
627 ret
= nouveau_bo_map(bo
, NOUVEAU_BO_RDWR
, nvc0
->base
.client
);
630 map
= (uint8_t *)bo
->map
;
631 info
= (volatile struct nve4_mp_trap_info
*)(map
+ NVE4_CP_PARAM_TRAP_INFO
);
634 debug_printf("trapstat = %08x\n", info
->trapstat
);
635 debug_printf("warperr = %08x\n", info
->warperr
);
636 debug_printf("PC = %x\n", info
->pc
);
637 debug_printf("tid = %u %u %u\n",
638 info
->tid
[0], info
->tid
[1], info
->tid
[2]);
639 debug_printf("ctaid = %u %u %u\n",
640 info
->ctaid
[0], info
->ctaid
[1], info
->ctaid
[2]);
641 for (i
= 0; i
<= 63; ++i
)
642 debug_printf("$r%i = %08x\n", i
, info
->r
[i
]);
643 for (i
= 0; i
<= 6; ++i
)
644 debug_printf("$p%i = %i\n", i
, (info
->flags
>> i
) & 1);
645 debug_printf("$c = %x\n", info
->flags
>> 12);