2 * Copyright 2012 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christoph Bumiller
25 #include "nvc0/nvc0_context.h"
26 #include "nvc0/nvc0_compute.h"
27 #include "nvc0/nve4_compute.h"
29 #include "codegen/nv50_ir_driver.h"
32 static void nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc
*);
37 nve4_screen_compute_setup(struct nvc0_screen
*screen
,
38 struct nouveau_pushbuf
*push
)
40 struct nouveau_device
*dev
= screen
->base
.device
;
41 struct nouveau_object
*chan
= screen
->base
.channel
;
46 switch (dev
->chipset
& ~0xf) {
49 obj_class
= NVF0_COMPUTE_CLASS
; /* GK110 */
52 obj_class
= NVE4_COMPUTE_CLASS
; /* GK104 */
55 NOUVEAU_ERR("unsupported chipset: NV%02x\n", dev
->chipset
);
59 ret
= nouveau_object_new(chan
, 0xbeef00c0, obj_class
, NULL
, 0,
62 NOUVEAU_ERR("Failed to allocate compute object: %d\n", ret
);
66 ret
= nouveau_bo_new(dev
, NV_VRAM_DOMAIN(&screen
->base
), 0, NVE4_CP_PARAM_SIZE
, NULL
,
71 BEGIN_NVC0(push
, SUBC_COMPUTE(NV01_SUBCHAN_OBJECT
), 1);
72 PUSH_DATA (push
, screen
->compute
->oclass
);
74 BEGIN_NVC0(push
, NVE4_COMPUTE(TEMP_ADDRESS_HIGH
), 2);
75 PUSH_DATAh(push
, screen
->tls
->offset
);
76 PUSH_DATA (push
, screen
->tls
->offset
);
77 /* No idea why there are 2. Divide size by 2 to be safe.
78 * Actually this might be per-MP TEMP size and looks like I'm only using
79 * 2 MPs instead of all 8.
81 BEGIN_NVC0(push
, NVE4_COMPUTE(MP_TEMP_SIZE_HIGH(0)), 3);
82 PUSH_DATAh(push
, screen
->tls
->size
/ screen
->mp_count
);
83 PUSH_DATA (push
, (screen
->tls
->size
/ screen
->mp_count
) & ~0x7fff);
84 PUSH_DATA (push
, 0xff);
85 BEGIN_NVC0(push
, NVE4_COMPUTE(MP_TEMP_SIZE_HIGH(1)), 3);
86 PUSH_DATAh(push
, screen
->tls
->size
/ screen
->mp_count
);
87 PUSH_DATA (push
, (screen
->tls
->size
/ screen
->mp_count
) & ~0x7fff);
88 PUSH_DATA (push
, 0xff);
90 /* Unified address space ? Who needs that ? Certainly not OpenCL.
92 * FATAL: Buffers with addresses inside [0x1000000, 0x3000000] will NOT be
93 * accessible. We cannot prevent that at the moment, so expect failure.
95 BEGIN_NVC0(push
, NVE4_COMPUTE(LOCAL_BASE
), 1);
96 PUSH_DATA (push
, 1 << 24);
97 BEGIN_NVC0(push
, NVE4_COMPUTE(SHARED_BASE
), 1);
98 PUSH_DATA (push
, 2 << 24);
100 BEGIN_NVC0(push
, NVE4_COMPUTE(CODE_ADDRESS_HIGH
), 2);
101 PUSH_DATAh(push
, screen
->text
->offset
);
102 PUSH_DATA (push
, screen
->text
->offset
);
104 BEGIN_NVC0(push
, SUBC_COMPUTE(0x0310), 1);
105 PUSH_DATA (push
, (obj_class
>= NVF0_COMPUTE_CLASS
) ? 0x400 : 0x300);
107 /* NOTE: these do not affect the state used by the 3D object */
108 BEGIN_NVC0(push
, NVE4_COMPUTE(TIC_ADDRESS_HIGH
), 3);
109 PUSH_DATAh(push
, screen
->txc
->offset
);
110 PUSH_DATA (push
, screen
->txc
->offset
);
111 PUSH_DATA (push
, NVC0_TIC_MAX_ENTRIES
- 1);
112 BEGIN_NVC0(push
, NVE4_COMPUTE(TSC_ADDRESS_HIGH
), 3);
113 PUSH_DATAh(push
, screen
->txc
->offset
+ 65536);
114 PUSH_DATA (push
, screen
->txc
->offset
+ 65536);
115 PUSH_DATA (push
, NVC0_TSC_MAX_ENTRIES
- 1);
117 if (obj_class
>= NVF0_COMPUTE_CLASS
) {
118 BEGIN_NVC0(push
, SUBC_COMPUTE(0x0248), 1);
119 PUSH_DATA (push
, 0x100);
120 BEGIN_NIC0(push
, SUBC_COMPUTE(0x0248), 63);
121 for (i
= 63; i
>= 1; --i
)
122 PUSH_DATA(push
, 0x38000 | i
);
123 IMMED_NVC0(push
, SUBC_COMPUTE(NV50_GRAPH_SERIALIZE
), 0);
124 IMMED_NVC0(push
, SUBC_COMPUTE(0x518), 0);
127 BEGIN_NVC0(push
, NVE4_COMPUTE(TEX_CB_INDEX
), 1);
128 PUSH_DATA (push
, 0); /* does not interefere with 3D */
130 if (obj_class
>= NVF0_COMPUTE_CLASS
)
131 IMMED_NVC0(push
, SUBC_COMPUTE(0x02c4), 1);
133 /* MS sample coordinate offsets: these do not work with _ALT modes ! */
134 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_DST_ADDRESS_HIGH
), 2);
135 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_INPUT_MS_OFFSETS
);
136 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_INPUT_MS_OFFSETS
);
137 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_LINE_LENGTH_IN
), 2);
138 PUSH_DATA (push
, 64);
140 BEGIN_1IC0(push
, NVE4_COMPUTE(UPLOAD_EXEC
), 17);
141 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
142 PUSH_DATA (push
, 0); /* 0 */
144 PUSH_DATA (push
, 1); /* 1 */
146 PUSH_DATA (push
, 0); /* 2 */
148 PUSH_DATA (push
, 1); /* 3 */
150 PUSH_DATA (push
, 2); /* 4 */
152 PUSH_DATA (push
, 3); /* 5 */
154 PUSH_DATA (push
, 2); /* 6 */
156 PUSH_DATA (push
, 3); /* 7 */
160 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_DST_ADDRESS_HIGH
), 2);
161 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_INPUT_TRAP_INFO_PTR
);
162 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_INPUT_TRAP_INFO_PTR
);
163 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_LINE_LENGTH_IN
), 2);
164 PUSH_DATA (push
, 28);
166 BEGIN_1IC0(push
, NVE4_COMPUTE(UPLOAD_EXEC
), 8);
168 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_PARAM_TRAP_INFO
);
169 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_PARAM_TRAP_INFO
);
170 PUSH_DATA (push
, screen
->tls
->offset
);
171 PUSH_DATAh(push
, screen
->tls
->offset
);
172 PUSH_DATA (push
, screen
->tls
->size
/ 2); /* MP TEMP block size */
173 PUSH_DATA (push
, screen
->tls
->size
/ 2 / 64); /* warp TEMP block size */
174 PUSH_DATA (push
, 0); /* warp cfstack size */
177 BEGIN_NVC0(push
, NVE4_COMPUTE(FLUSH
), 1);
178 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
);
185 nve4_compute_validate_surfaces(struct nvc0_context
*nvc0
)
187 struct nvc0_screen
*screen
= nvc0
->screen
;
188 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
189 struct nv50_surface
*sf
;
190 struct nv04_resource
*res
;
193 const unsigned t
= 1;
195 mask
= nvc0
->surfaces_dirty
[t
];
201 * NVE4's surface load/store instructions receive all the information
202 * directly instead of via binding points, so we have to supply them.
204 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_DST_ADDRESS_HIGH
), 2);
205 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_INPUT_SUF(i
));
206 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_INPUT_SUF(i
));
207 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_LINE_LENGTH_IN
), 2);
208 PUSH_DATA (push
, 64);
210 BEGIN_1IC0(push
, NVE4_COMPUTE(UPLOAD_EXEC
), 17);
211 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
213 nve4_set_surface_info(push
, nvc0
->surfaces
[t
][i
], screen
);
215 sf
= nv50_surface(nvc0
->surfaces
[t
][i
]);
217 res
= nv04_resource(sf
->base
.texture
);
219 if (sf
->base
.writable
)
220 BCTX_REFN(nvc0
->bufctx_cp
, CP_SUF
, res
, RDWR
);
222 BCTX_REFN(nvc0
->bufctx_cp
, CP_SUF
, res
, RD
);
225 if (nvc0
->surfaces_dirty
[t
]) {
226 BEGIN_NVC0(push
, NVE4_COMPUTE(FLUSH
), 1);
227 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
);
230 /* re-reference non-dirty surfaces */
231 mask
= nvc0
->surfaces_valid
[t
] & ~nvc0
->surfaces_dirty
[t
];
236 sf
= nv50_surface(nvc0
->surfaces
[t
][i
]);
237 res
= nv04_resource(sf
->base
.texture
);
239 if (sf
->base
.writable
)
240 BCTX_REFN(nvc0
->bufctx_cp
, CP_SUF
, res
, RDWR
);
242 BCTX_REFN(nvc0
->bufctx_cp
, CP_SUF
, res
, RD
);
245 nvc0
->surfaces_dirty
[t
] = 0;
249 /* Thankfully, textures with samplers follow the normal rules. */
251 nve4_compute_validate_samplers(struct nvc0_context
*nvc0
)
253 bool need_flush
= nve4_validate_tsc(nvc0
, 5);
255 BEGIN_NVC0(nvc0
->base
.pushbuf
, NVE4_COMPUTE(TSC_FLUSH
), 1);
256 PUSH_DATA (nvc0
->base
.pushbuf
, 0);
259 /* (Code duplicated at bottom for various non-convincing reasons.
260 * E.g. we might want to use the COMPUTE subchannel to upload TIC/TSC
261 * entries to avoid a subchannel switch.
262 * Same for texture cache flushes.
263 * Also, the bufctx differs, and more IFs in the 3D version looks ugly.)
265 static void nve4_compute_validate_textures(struct nvc0_context
*);
268 nve4_compute_set_tex_handles(struct nvc0_context
*nvc0
)
270 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
272 const unsigned s
= nvc0_shader_stage(PIPE_SHADER_COMPUTE
);
274 uint32_t dirty
= nvc0
->textures_dirty
[s
] | nvc0
->samplers_dirty
[s
];
279 n
= util_logbase2(dirty
) + 1 - i
;
282 address
= nvc0
->screen
->parm
->offset
+ NVE4_CP_INPUT_TEX(i
);
284 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_DST_ADDRESS_HIGH
), 2);
285 PUSH_DATAh(push
, address
);
286 PUSH_DATA (push
, address
);
287 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_LINE_LENGTH_IN
), 2);
288 PUSH_DATA (push
, n
* 4);
289 PUSH_DATA (push
, 0x1);
290 BEGIN_1IC0(push
, NVE4_COMPUTE(UPLOAD_EXEC
), 1 + n
);
291 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
292 PUSH_DATAp(push
, &nvc0
->tex_handles
[s
][i
], n
);
294 BEGIN_NVC0(push
, NVE4_COMPUTE(FLUSH
), 1);
295 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
);
297 nvc0
->textures_dirty
[s
] = 0;
298 nvc0
->samplers_dirty
[s
] = 0;
303 nve4_compute_state_validate(struct nvc0_context
*nvc0
)
305 if (!nvc0_compute_validate_program(nvc0
))
307 if (nvc0
->dirty_cp
& NVC0_NEW_CP_TEXTURES
)
308 nve4_compute_validate_textures(nvc0
);
309 if (nvc0
->dirty_cp
& NVC0_NEW_CP_SAMPLERS
)
310 nve4_compute_validate_samplers(nvc0
);
311 if (nvc0
->dirty_cp
& (NVC0_NEW_CP_TEXTURES
| NVC0_NEW_CP_SAMPLERS
))
312 nve4_compute_set_tex_handles(nvc0
);
313 if (nvc0
->dirty_cp
& NVC0_NEW_CP_SURFACES
)
314 nve4_compute_validate_surfaces(nvc0
);
315 if (nvc0
->dirty_cp
& NVC0_NEW_CP_GLOBALS
)
316 nvc0_validate_global_residents(nvc0
,
317 nvc0
->bufctx_cp
, NVC0_BIND_CP_GLOBAL
);
319 nvc0_bufctx_fence(nvc0
, nvc0
->bufctx_cp
, false);
321 nouveau_pushbuf_bufctx(nvc0
->base
.pushbuf
, nvc0
->bufctx_cp
);
322 if (unlikely(nouveau_pushbuf_validate(nvc0
->base
.pushbuf
)))
324 if (unlikely(nvc0
->state
.flushed
))
325 nvc0_bufctx_fence(nvc0
, nvc0
->bufctx_cp
, true);
332 nve4_compute_upload_input(struct nvc0_context
*nvc0
, const void *input
,
333 const uint
*block_layout
,
334 const uint
*grid_layout
)
336 struct nvc0_screen
*screen
= nvc0
->screen
;
337 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
338 struct nvc0_program
*cp
= nvc0
->compprog
;
341 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_DST_ADDRESS_HIGH
), 2);
342 PUSH_DATAh(push
, screen
->parm
->offset
);
343 PUSH_DATA (push
, screen
->parm
->offset
);
344 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_LINE_LENGTH_IN
), 2);
345 PUSH_DATA (push
, cp
->parm_size
);
346 PUSH_DATA (push
, 0x1);
347 BEGIN_1IC0(push
, NVE4_COMPUTE(UPLOAD_EXEC
), 1 + (cp
->parm_size
/ 4));
348 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
349 PUSH_DATAp(push
, input
, cp
->parm_size
/ 4);
351 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_DST_ADDRESS_HIGH
), 2);
352 PUSH_DATAh(push
, screen
->parm
->offset
+ NVE4_CP_INPUT_GRID_INFO(0));
353 PUSH_DATA (push
, screen
->parm
->offset
+ NVE4_CP_INPUT_GRID_INFO(0));
354 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_LINE_LENGTH_IN
), 2);
355 PUSH_DATA (push
, 7 * 4);
356 PUSH_DATA (push
, 0x1);
357 BEGIN_1IC0(push
, NVE4_COMPUTE(UPLOAD_EXEC
), 1 + 7);
358 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
359 PUSH_DATAp(push
, block_layout
, 3);
360 PUSH_DATAp(push
, grid_layout
, 3);
363 BEGIN_NVC0(push
, NVE4_COMPUTE(FLUSH
), 1);
364 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
);
367 static inline uint8_t
368 nve4_compute_derive_cache_split(struct nvc0_context
*nvc0
, uint32_t shared_size
)
370 if (shared_size
> (32 << 10))
371 return NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1
;
372 if (shared_size
> (16 << 10))
373 return NVE4_3D_CACHE_SPLIT_32K_SHARED_32K_L1
;
374 return NVC1_3D_CACHE_SPLIT_16K_SHARED_48K_L1
;
378 nve4_compute_setup_launch_desc(struct nvc0_context
*nvc0
,
379 struct nve4_cp_launch_desc
*desc
,
381 const uint
*block_layout
,
382 const uint
*grid_layout
)
384 const struct nvc0_screen
*screen
= nvc0
->screen
;
385 const struct nvc0_program
*cp
= nvc0
->compprog
;
388 nve4_cp_launch_desc_init_default(desc
);
390 desc
->entry
= nvc0_program_symbol_offset(cp
, label
);
392 desc
->griddim_x
= grid_layout
[0];
393 desc
->griddim_y
= grid_layout
[1];
394 desc
->griddim_z
= grid_layout
[2];
395 desc
->blockdim_x
= block_layout
[0];
396 desc
->blockdim_y
= block_layout
[1];
397 desc
->blockdim_z
= block_layout
[2];
399 desc
->shared_size
= align(cp
->cp
.smem_size
, 0x100);
400 desc
->local_size_p
= align(cp
->cp
.lmem_size
, 0x10);
401 desc
->local_size_n
= 0;
402 desc
->cstack_size
= 0x800;
403 desc
->cache_split
= nve4_compute_derive_cache_split(nvc0
, cp
->cp
.smem_size
);
405 desc
->gpr_alloc
= cp
->num_gprs
;
406 desc
->bar_alloc
= cp
->num_barriers
;
408 for (i
= 0; i
< 7; ++i
) {
409 const unsigned s
= 5;
410 if (nvc0
->constbuf
[s
][i
].u
.buf
)
411 nve4_cp_launch_desc_set_ctx_cb(desc
, i
+ 1, &nvc0
->constbuf
[s
][i
]);
413 nve4_cp_launch_desc_set_cb(desc
, 0, screen
->parm
, 0, NVE4_CP_INPUT_SIZE
);
416 static inline struct nve4_cp_launch_desc
*
417 nve4_compute_alloc_launch_desc(struct nouveau_context
*nv
,
418 struct nouveau_bo
**pbo
, uint64_t *pgpuaddr
)
420 uint8_t *ptr
= nouveau_scratch_get(nv
, 512, pgpuaddr
, pbo
);
423 if (*pgpuaddr
& 255) {
424 unsigned adj
= 256 - (*pgpuaddr
& 255);
428 return (struct nve4_cp_launch_desc
*)ptr
;
432 nve4_launch_grid(struct pipe_context
*pipe
,
433 const uint
*block_layout
, const uint
*grid_layout
,
437 struct nvc0_context
*nvc0
= nvc0_context(pipe
);
438 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
439 struct nve4_cp_launch_desc
*desc
;
440 uint64_t desc_gpuaddr
;
441 struct nouveau_bo
*desc_bo
;
444 desc
= nve4_compute_alloc_launch_desc(&nvc0
->base
, &desc_bo
, &desc_gpuaddr
);
449 BCTX_REFN_bo(nvc0
->bufctx_cp
, CP_DESC
, NOUVEAU_BO_GART
| NOUVEAU_BO_RD
,
452 ret
= !nve4_compute_state_validate(nvc0
);
456 nve4_compute_setup_launch_desc(nvc0
, desc
, label
, block_layout
, grid_layout
);
458 if (debug_get_num_option("NV50_PROG_DEBUG", 0))
459 nve4_compute_dump_launch_desc(desc
);
462 nve4_compute_upload_input(nvc0
, input
, block_layout
, grid_layout
);
464 /* upload descriptor and flush */
466 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_DST_ADDRESS_HIGH
), 2);
467 PUSH_DATAh(push
, desc_gpuaddr
);
468 PUSH_DATA (push
, desc_gpuaddr
);
469 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_LINE_LENGTH_IN
), 2);
470 PUSH_DATA (push
, 256);
472 BEGIN_1IC0(push
, NVE4_COMPUTE(UPLOAD_EXEC
), 1 + (256 / 4));
473 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x08 << 1));
474 PUSH_DATAp(push
, (const uint32_t *)desc
, 256 / 4);
475 BEGIN_NVC0(push
, NVE4_COMPUTE(FLUSH
), 1);
476 PUSH_DATA (push
, NVE4_COMPUTE_FLUSH_CB
| NVE4_COMPUTE_FLUSH_CODE
);
478 BEGIN_NVC0(push
, NVE4_COMPUTE(LAUNCH_DESC_ADDRESS
), 1);
479 PUSH_DATA (push
, desc_gpuaddr
>> 8);
480 BEGIN_NVC0(push
, NVE4_COMPUTE(LAUNCH
), 1);
481 PUSH_DATA (push
, 0x3);
482 BEGIN_NVC0(push
, SUBC_COMPUTE(NV50_GRAPH_SERIALIZE
), 1);
487 NOUVEAU_ERR("Failed to launch grid !\n");
488 nouveau_scratch_done(&nvc0
->base
);
489 nouveau_bufctx_reset(nvc0
->bufctx_cp
, NVC0_BIND_CP_DESC
);
493 #define NVE4_TIC_ENTRY_INVALID 0x000fffff
496 nve4_compute_validate_textures(struct nvc0_context
*nvc0
)
498 struct nouveau_bo
*txc
= nvc0
->screen
->txc
;
499 struct nouveau_pushbuf
*push
= nvc0
->base
.pushbuf
;
500 const unsigned s
= 5;
502 uint32_t commands
[2][NVE4_CP_INPUT_TEX_MAX
];
503 unsigned n
[2] = { 0, 0 };
505 for (i
= 0; i
< nvc0
->num_textures
[s
]; ++i
) {
506 struct nv50_tic_entry
*tic
= nv50_tic_entry(nvc0
->textures
[s
][i
]);
507 struct nv04_resource
*res
;
508 const bool dirty
= !!(nvc0
->textures_dirty
[s
] & (1 << i
));
511 nvc0
->tex_handles
[s
][i
] |= NVE4_TIC_ENTRY_INVALID
;
514 res
= nv04_resource(tic
->pipe
.texture
);
517 tic
->id
= nvc0_screen_tic_alloc(nvc0
->screen
, tic
);
519 PUSH_SPACE(push
, 16);
520 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_DST_ADDRESS_HIGH
), 2);
521 PUSH_DATAh(push
, txc
->offset
+ (tic
->id
* 32));
522 PUSH_DATA (push
, txc
->offset
+ (tic
->id
* 32));
523 BEGIN_NVC0(push
, NVE4_COMPUTE(UPLOAD_LINE_LENGTH_IN
), 2);
524 PUSH_DATA (push
, 32);
526 BEGIN_1IC0(push
, NVE4_COMPUTE(UPLOAD_EXEC
), 9);
527 PUSH_DATA (push
, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR
| (0x20 << 1));
528 PUSH_DATAp(push
, &tic
->tic
[0], 8);
530 commands
[0][n
[0]++] = (tic
->id
<< 4) | 1;
532 if (res
->status
& NOUVEAU_BUFFER_STATUS_GPU_WRITING
) {
533 commands
[1][n
[1]++] = (tic
->id
<< 4) | 1;
535 nvc0
->screen
->tic
.lock
[tic
->id
/ 32] |= 1 << (tic
->id
% 32);
537 res
->status
&= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING
;
538 res
->status
|= NOUVEAU_BUFFER_STATUS_GPU_READING
;
540 nvc0
->tex_handles
[s
][i
] &= ~NVE4_TIC_ENTRY_INVALID
;
541 nvc0
->tex_handles
[s
][i
] |= tic
->id
;
543 BCTX_REFN(nvc0
->bufctx_cp
, CP_TEX(i
), res
, RD
);
545 for (; i
< nvc0
->state
.num_textures
[s
]; ++i
)
546 nvc0
->tex_handles
[s
][i
] |= NVE4_TIC_ENTRY_INVALID
;
549 BEGIN_NIC0(push
, NVE4_COMPUTE(TIC_FLUSH
), n
[0]);
550 PUSH_DATAp(push
, commands
[0], n
[0]);
553 BEGIN_NIC0(push
, NVE4_COMPUTE(TEX_CACHE_CTL
), n
[1]);
554 PUSH_DATAp(push
, commands
[1], n
[1]);
557 nvc0
->state
.num_textures
[s
] = nvc0
->num_textures
[s
];
562 static const char *nve4_cache_split_name(unsigned value
)
565 case NVC1_3D_CACHE_SPLIT_16K_SHARED_48K_L1
: return "16K_SHARED_48K_L1";
566 case NVE4_3D_CACHE_SPLIT_32K_SHARED_32K_L1
: return "32K_SHARED_32K_L1";
567 case NVC0_3D_CACHE_SPLIT_48K_SHARED_16K_L1
: return "48K_SHARED_16K_L1";
574 nve4_compute_dump_launch_desc(const struct nve4_cp_launch_desc
*desc
)
576 const uint32_t *data
= (const uint32_t *)desc
;
580 debug_printf("COMPUTE LAUNCH DESCRIPTOR:\n");
582 for (i
= 0; i
< sizeof(*desc
); i
+= 4) {
584 debug_printf("[%x]: 0x%08x\n", i
, data
[i
/ 4]);
588 debug_printf("...\n");
593 debug_printf("entry = 0x%x\n", desc
->entry
);
594 debug_printf("grid dimensions = %ux%ux%u\n",
595 desc
->griddim_x
, desc
->griddim_y
, desc
->griddim_z
);
596 debug_printf("block dimensions = %ux%ux%u\n",
597 desc
->blockdim_x
, desc
->blockdim_y
, desc
->blockdim_z
);
598 debug_printf("s[] size: 0x%x\n", desc
->shared_size
);
599 debug_printf("l[] size: -0x%x / +0x%x\n",
600 desc
->local_size_n
, desc
->local_size_p
);
601 debug_printf("stack size: 0x%x\n", desc
->cstack_size
);
602 debug_printf("barrier count: %u\n", desc
->bar_alloc
);
603 debug_printf("$r count: %u\n", desc
->gpr_alloc
);
604 debug_printf("cache split: %s\n", nve4_cache_split_name(desc
->cache_split
));
606 for (i
= 0; i
< 8; ++i
) {
608 uint32_t size
= desc
->cb
[i
].size
;
609 bool valid
= !!(desc
->cb_mask
& (1 << i
));
611 address
= ((uint64_t)desc
->cb
[i
].address_h
<< 32) | desc
->cb
[i
].address_l
;
613 if (!valid
&& !address
&& !size
)
615 debug_printf("CB[%u]: address = 0x%"PRIx64
", size 0x%x%s\n",
616 i
, address
, size
, valid
? "" : " (invalid)");
621 #ifdef NOUVEAU_NVE4_MP_TRAP_HANDLER
623 nve4_compute_trap_info(struct nvc0_context
*nvc0
)
625 struct nvc0_screen
*screen
= nvc0
->screen
;
626 struct nouveau_bo
*bo
= screen
->parm
;
628 volatile struct nve4_mp_trap_info
*info
;
631 ret
= nouveau_bo_map(bo
, NOUVEAU_BO_RDWR
, nvc0
->base
.client
);
634 map
= (uint8_t *)bo
->map
;
635 info
= (volatile struct nve4_mp_trap_info
*)(map
+ NVE4_CP_PARAM_TRAP_INFO
);
638 debug_printf("trapstat = %08x\n", info
->trapstat
);
639 debug_printf("warperr = %08x\n", info
->warperr
);
640 debug_printf("PC = %x\n", info
->pc
);
641 debug_printf("tid = %u %u %u\n",
642 info
->tid
[0], info
->tid
[1], info
->tid
[2]);
643 debug_printf("ctaid = %u %u %u\n",
644 info
->ctaid
[0], info
->ctaid
[1], info
->ctaid
[2]);
645 for (i
= 0; i
<= 63; ++i
)
646 debug_printf("$r%i = %08x\n", i
, info
->r
[i
]);
647 for (i
= 0; i
<= 6; ++i
)
648 debug_printf("$p%i = %i\n", i
, (info
->flags
>> i
) & 1);
649 debug_printf("$c = %x\n", info
->flags
>> 12);