vc4: Update to current kernel sources.
[mesa.git] / src / gallium / drivers / vc4 / vc4_context.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <xf86drm.h>
25 #include <err.h>
26
27 #include "pipe/p_defines.h"
28 #include "util/ralloc.h"
29 #include "util/u_inlines.h"
30 #include "util/u_memory.h"
31 #include "util/u_blitter.h"
32 #include "indices/u_primconvert.h"
33 #include "pipe/p_screen.h"
34
35 #include "vc4_screen.h"
36 #include "vc4_context.h"
37 #include "vc4_resource.h"
38
39 /**
40 * Emits a no-op STORE_TILE_BUFFER_GENERAL.
41 *
42 * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of
43 * some sort before another load is triggered.
44 */
45 static void
46 vc4_store_before_load(struct vc4_context *vc4, bool *coords_emitted)
47 {
48 if (!*coords_emitted)
49 return;
50
51 cl_u8(&vc4->rcl, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
52 cl_u8(&vc4->rcl, VC4_LOADSTORE_TILE_BUFFER_NONE);
53 cl_u8(&vc4->rcl, (VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR |
54 VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR |
55 VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR));
56 cl_u32(&vc4->rcl, 0); /* no address, since we're in None mode */
57
58 *coords_emitted = false;
59 }
60
61 /**
62 * Emits a PACKET_TILE_COORDINATES if one isn't already pending.
63 *
64 * The tile coordinates packet triggers a pending load if there is one, are
65 * used for clipping during rendering, and determine where loads/stores happen
66 * relative to their base address.
67 */
68 static void
69 vc4_tile_coordinates(struct vc4_context *vc4, uint32_t x, uint32_t y,
70 bool *coords_emitted)
71 {
72 if (*coords_emitted)
73 return;
74
75 cl_u8(&vc4->rcl, VC4_PACKET_TILE_COORDINATES);
76 cl_u8(&vc4->rcl, x);
77 cl_u8(&vc4->rcl, y);
78
79 *coords_emitted = true;
80 }
81
82 static void
83 vc4_setup_rcl(struct vc4_context *vc4)
84 {
85 struct vc4_surface *csurf = vc4_surface(vc4->framebuffer.cbufs[0]);
86 struct vc4_resource *ctex = csurf ? vc4_resource(csurf->base.texture) : NULL;
87 struct vc4_surface *zsurf = vc4_surface(vc4->framebuffer.zsbuf);
88 struct vc4_resource *ztex = zsurf ? vc4_resource(zsurf->base.texture) : NULL;
89
90 if (!csurf)
91 vc4->resolve &= ~PIPE_CLEAR_COLOR0;
92 if (!zsurf)
93 vc4->resolve &= ~(PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL);
94 uint32_t resolve_uncleared = vc4->resolve & ~vc4->cleared;
95 uint32_t width = vc4->framebuffer.width;
96 uint32_t height = vc4->framebuffer.height;
97 uint32_t stride_in_tiles = align(width, 64) / 64;
98
99 assert(vc4->draw_min_x != ~0 && vc4->draw_min_y != ~0);
100 uint32_t min_x_tile = vc4->draw_min_x / 64;
101 uint32_t min_y_tile = vc4->draw_min_y / 64;
102 uint32_t max_x_tile = (vc4->draw_max_x - 1) / 64;
103 uint32_t max_y_tile = (vc4->draw_max_y - 1) / 64;
104 uint32_t xtiles = max_x_tile - min_x_tile + 1;
105 uint32_t ytiles = max_y_tile - min_y_tile + 1;
106
107 #if 0
108 fprintf(stderr, "RCL: resolve 0x%x clear 0x%x resolve uncleared 0x%x\n",
109 vc4->resolve,
110 vc4->cleared,
111 resolve_uncleared);
112 #endif
113
114 uint32_t reloc_size = 9;
115 uint32_t clear_size = 14;
116 uint32_t config_size = 11 + reloc_size;
117 uint32_t loadstore_size = 7 + reloc_size;
118 uint32_t tilecoords_size = 3;
119 uint32_t branch_size = 5 + reloc_size;
120 uint32_t color_store_size = 1;
121 cl_ensure_space(&vc4->rcl,
122 clear_size +
123 config_size +
124 loadstore_size +
125 xtiles * ytiles * (loadstore_size * 4 +
126 tilecoords_size * 3 +
127 branch_size +
128 color_store_size));
129
130 cl_u8(&vc4->rcl, VC4_PACKET_CLEAR_COLORS);
131 cl_u32(&vc4->rcl, vc4->clear_color[0]);
132 cl_u32(&vc4->rcl, vc4->clear_color[1]);
133 cl_u32(&vc4->rcl, vc4->clear_depth);
134 cl_u8(&vc4->rcl, vc4->clear_stencil);
135
136 /* The rendering mode config determines the pointer that's used for
137 * VC4_PACKET_STORE_MS_TILE_BUFFER address computations. The kernel
138 * could handle a no-relocation rendering mode config and deny those
139 * packets, but instead we just tell the kernel we're doing our color
140 * rendering to the Z buffer, and just don't emit any of those
141 * packets.
142 */
143 struct vc4_surface *render_surf = csurf ? csurf : zsurf;
144 struct vc4_resource *render_tex = vc4_resource(render_surf->base.texture);
145 cl_start_reloc(&vc4->rcl, 1);
146 cl_u8(&vc4->rcl, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
147 cl_reloc(vc4, &vc4->rcl, render_tex->bo, render_surf->offset);
148 cl_u16(&vc4->rcl, width);
149 cl_u16(&vc4->rcl, height);
150 cl_u16(&vc4->rcl, ((render_surf->tiling <<
151 VC4_RENDER_CONFIG_MEMORY_FORMAT_SHIFT) |
152 (vc4_rt_format_is_565(render_surf->base.format) ?
153 VC4_RENDER_CONFIG_FORMAT_BGR565 :
154 VC4_RENDER_CONFIG_FORMAT_RGBA8888)));
155
156 /* The tile buffer normally gets cleared when the previous tile is
157 * stored. If the clear values changed between frames, then the tile
158 * buffer has stale clear values in it, so we have to do a store in
159 * None mode (no writes) so that we trigger the tile buffer clear.
160 *
161 * Excess clearing is only a performance cost, since per-tile contents
162 * will be loaded/stored in the loop below.
163 */
164 if (vc4->cleared & (PIPE_CLEAR_COLOR0 |
165 PIPE_CLEAR_DEPTH |
166 PIPE_CLEAR_STENCIL)) {
167 cl_u8(&vc4->rcl, VC4_PACKET_TILE_COORDINATES);
168 cl_u8(&vc4->rcl, 0);
169 cl_u8(&vc4->rcl, 0);
170
171 cl_u8(&vc4->rcl, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
172 cl_u16(&vc4->rcl, VC4_LOADSTORE_TILE_BUFFER_NONE);
173 cl_u32(&vc4->rcl, 0); /* no address, since we're in None mode */
174 }
175
176 uint32_t color_hindex = ctex ? vc4_gem_hindex(vc4, ctex->bo) : 0;
177 uint32_t depth_hindex = ztex ? vc4_gem_hindex(vc4, ztex->bo) : 0;
178 uint32_t tile_alloc_hindex = vc4_gem_hindex(vc4, vc4->tile_alloc);
179
180 for (int y = min_y_tile; y <= max_y_tile; y++) {
181 for (int x = min_x_tile; x <= max_x_tile; x++) {
182 bool end_of_frame = (x == max_x_tile &&
183 y == max_y_tile);
184 bool coords_emitted = false;
185
186 /* Note that the load doesn't actually occur until the
187 * tile coords packet is processed, and only one load
188 * may be outstanding at a time.
189 */
190 if (resolve_uncleared & PIPE_CLEAR_COLOR) {
191 vc4_store_before_load(vc4, &coords_emitted);
192
193 cl_start_reloc(&vc4->rcl, 1);
194 cl_u8(&vc4->rcl, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
195 cl_u8(&vc4->rcl,
196 VC4_LOADSTORE_TILE_BUFFER_COLOR |
197 (csurf->tiling <<
198 VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT));
199 cl_u8(&vc4->rcl,
200 vc4_rt_format_is_565(csurf->base.format) ?
201 VC4_LOADSTORE_TILE_BUFFER_BGR565 :
202 VC4_LOADSTORE_TILE_BUFFER_RGBA8888);
203 cl_reloc_hindex(&vc4->rcl, color_hindex,
204 csurf->offset);
205
206 vc4_tile_coordinates(vc4, x, y, &coords_emitted);
207 }
208
209 if (resolve_uncleared & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
210 vc4_store_before_load(vc4, &coords_emitted);
211
212 cl_start_reloc(&vc4->rcl, 1);
213 cl_u8(&vc4->rcl, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
214 cl_u8(&vc4->rcl,
215 VC4_LOADSTORE_TILE_BUFFER_ZS |
216 (zsurf->tiling <<
217 VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT));
218 cl_u8(&vc4->rcl, 0);
219 cl_reloc_hindex(&vc4->rcl, depth_hindex,
220 zsurf->offset);
221
222 vc4_tile_coordinates(vc4, x, y, &coords_emitted);
223 }
224
225 /* Clipping depends on tile coordinates having been
226 * emitted, so make sure it's happened even if
227 * everything was cleared to start.
228 */
229 vc4_tile_coordinates(vc4, x, y, &coords_emitted);
230
231 /* Wait for the binner before jumping to the first
232 * tile's lists.
233 */
234 if (x == min_x_tile && y == min_y_tile)
235 cl_u8(&vc4->rcl, VC4_PACKET_WAIT_ON_SEMAPHORE);
236
237 cl_start_reloc(&vc4->rcl, 1);
238 cl_u8(&vc4->rcl, VC4_PACKET_BRANCH_TO_SUB_LIST);
239 cl_reloc_hindex(&vc4->rcl, tile_alloc_hindex,
240 (y * stride_in_tiles + x) * 32);
241
242 if (vc4->resolve & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
243 vc4_tile_coordinates(vc4, x, y, &coords_emitted);
244
245 cl_start_reloc(&vc4->rcl, 1);
246 cl_u8(&vc4->rcl, VC4_PACKET_STORE_TILE_BUFFER_GENERAL);
247 cl_u8(&vc4->rcl,
248 VC4_LOADSTORE_TILE_BUFFER_ZS |
249 (zsurf->tiling <<
250 VC4_LOADSTORE_TILE_BUFFER_FORMAT_SHIFT));
251 cl_u8(&vc4->rcl,
252 VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR);
253 cl_reloc_hindex(&vc4->rcl, depth_hindex,
254 zsurf->offset |
255 ((end_of_frame &&
256 !(vc4->resolve & PIPE_CLEAR_COLOR0)) ?
257 VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
258
259 coords_emitted = false;
260 }
261
262 if (vc4->resolve & PIPE_CLEAR_COLOR0) {
263 vc4_tile_coordinates(vc4, x, y, &coords_emitted);
264 if (end_of_frame) {
265 cl_u8(&vc4->rcl,
266 VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF);
267 } else {
268 cl_u8(&vc4->rcl,
269 VC4_PACKET_STORE_MS_TILE_BUFFER);
270 }
271
272 coords_emitted = false;
273 }
274
275 /* One of the bits needs to have been set that would
276 * have triggered an EOF.
277 */
278 assert(vc4->resolve & (PIPE_CLEAR_COLOR0 |
279 PIPE_CLEAR_DEPTH |
280 PIPE_CLEAR_STENCIL));
281 /* Any coords emitted must also have been consumed by
282 * a store.
283 */
284 assert(!coords_emitted);
285 }
286 }
287
288 if (vc4->resolve & PIPE_CLEAR_COLOR0)
289 ctex->writes++;
290
291 if (vc4->resolve & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))
292 ztex->writes++;
293 }
294
295 static void
296 vc4_draw_reset(struct vc4_context *vc4)
297 {
298 struct vc4_bo **referenced_bos = vc4->bo_pointers.base;
299 for (int i = 0; i < (vc4->bo_handles.next -
300 vc4->bo_handles.base) / 4; i++) {
301 vc4_bo_unreference(&referenced_bos[i]);
302 }
303 vc4_reset_cl(&vc4->bcl);
304 vc4_reset_cl(&vc4->rcl);
305 vc4_reset_cl(&vc4->shader_rec);
306 vc4_reset_cl(&vc4->uniforms);
307 vc4_reset_cl(&vc4->bo_handles);
308 vc4_reset_cl(&vc4->bo_pointers);
309 vc4->shader_rec_count = 0;
310
311 vc4->needs_flush = false;
312 vc4->draw_call_queued = false;
313
314 /* We have no hardware context saved between our draw calls, so we
315 * need to flag the next draw as needing all state emitted. Emitting
316 * all state at the start of our draws is also what ensures that we
317 * return to the state we need after a previous tile has finished.
318 */
319 vc4->dirty = ~0;
320 vc4->resolve = 0;
321 vc4->cleared = 0;
322
323 vc4->draw_min_x = ~0;
324 vc4->draw_min_y = ~0;
325 vc4->draw_max_x = 0;
326 vc4->draw_max_y = 0;
327 }
328
329 void
330 vc4_flush(struct pipe_context *pctx)
331 {
332 struct vc4_context *vc4 = vc4_context(pctx);
333
334 if (!vc4->needs_flush)
335 return;
336
337 /* The RCL setup would choke if the draw bounds cause no drawing, so
338 * just drop the drawing if that's the case.
339 */
340 if (vc4->draw_max_x <= vc4->draw_min_x ||
341 vc4->draw_max_y <= vc4->draw_min_y) {
342 vc4_draw_reset(vc4);
343 return;
344 }
345
346 /* Increment the semaphore indicating that binning is done and
347 * unblocking the render thread. Note that this doesn't act until the
348 * FLUSH completes.
349 */
350 cl_u8(&vc4->bcl, VC4_PACKET_INCREMENT_SEMAPHORE);
351 /* The FLUSH caps all of our bin lists with a VC4_PACKET_RETURN. */
352 cl_u8(&vc4->bcl, VC4_PACKET_FLUSH);
353
354 vc4_setup_rcl(vc4);
355
356 if (vc4_debug & VC4_DEBUG_CL) {
357 fprintf(stderr, "BCL:\n");
358 vc4_dump_cl(vc4->bcl.base, vc4->bcl.next - vc4->bcl.base, false);
359 fprintf(stderr, "RCL:\n");
360 vc4_dump_cl(vc4->rcl.base, vc4->rcl.next - vc4->rcl.base, true);
361 }
362
363 struct drm_vc4_submit_cl submit;
364 memset(&submit, 0, sizeof(submit));
365
366 submit.bo_handles = (uintptr_t)vc4->bo_handles.base;
367 submit.bo_handle_count = (vc4->bo_handles.next -
368 vc4->bo_handles.base) / 4;
369 submit.bin_cl = (uintptr_t)vc4->bcl.base;
370 submit.bin_cl_size = vc4->bcl.next - vc4->bcl.base;
371 submit.render_cl = (uintptr_t)vc4->rcl.base;
372 submit.render_cl_size = vc4->rcl.next - vc4->rcl.base;
373 submit.shader_rec = (uintptr_t)vc4->shader_rec.base;
374 submit.shader_rec_size = vc4->shader_rec.next - vc4->shader_rec.base;
375 submit.shader_rec_count = vc4->shader_rec_count;
376 submit.uniforms = (uintptr_t)vc4->uniforms.base;
377 submit.uniforms_size = vc4->uniforms.next - vc4->uniforms.base;
378
379 if (!(vc4_debug & VC4_DEBUG_NORAST)) {
380 int ret;
381
382 #ifndef USE_VC4_SIMULATOR
383 ret = drmIoctl(vc4->fd, DRM_IOCTL_VC4_SUBMIT_CL, &submit);
384 #else
385 ret = vc4_simulator_flush(vc4, &submit);
386 #endif
387 if (ret) {
388 fprintf(stderr, "VC4 submit failed\n");
389 abort();
390 }
391 }
392
393 vc4->last_emit_seqno = submit.seqno;
394
395 if (vc4_debug & VC4_DEBUG_ALWAYS_SYNC) {
396 if (!vc4_wait_seqno(vc4->screen, vc4->last_emit_seqno,
397 PIPE_TIMEOUT_INFINITE)) {
398 fprintf(stderr, "Wait failed.\n");
399 abort();
400 }
401 }
402
403 vc4_draw_reset(vc4);
404 }
405
406 static void
407 vc4_pipe_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
408 unsigned flags)
409 {
410 struct vc4_context *vc4 = vc4_context(pctx);
411
412 vc4_flush(pctx);
413
414 if (fence) {
415 struct vc4_fence *f = vc4_fence_create(vc4->screen,
416 vc4->last_emit_seqno);
417 *fence = (struct pipe_fence_handle *)f;
418 }
419 }
420
421 /**
422 * Flushes the current command lists if they reference the given BO.
423 *
424 * This helps avoid flushing the command buffers when unnecessary.
425 */
426 bool
427 vc4_cl_references_bo(struct pipe_context *pctx, struct vc4_bo *bo)
428 {
429 struct vc4_context *vc4 = vc4_context(pctx);
430
431 if (!vc4->needs_flush)
432 return false;
433
434 /* Walk all the referenced BOs in the drawing command list to see if
435 * they match.
436 */
437 struct vc4_bo **referenced_bos = vc4->bo_pointers.base;
438 for (int i = 0; i < (vc4->bo_handles.next -
439 vc4->bo_handles.base) / 4; i++) {
440 if (referenced_bos[i] == bo) {
441 return true;
442 }
443 }
444
445 /* Also check for the Z/color buffers, since the references to those
446 * are only added immediately before submit.
447 */
448 struct vc4_surface *csurf = vc4_surface(vc4->framebuffer.cbufs[0]);
449 if (csurf) {
450 struct vc4_resource *ctex = vc4_resource(csurf->base.texture);
451 if (ctex->bo == bo) {
452 return true;
453 }
454 }
455
456 struct vc4_surface *zsurf = vc4_surface(vc4->framebuffer.zsbuf);
457 if (zsurf) {
458 struct vc4_resource *ztex =
459 vc4_resource(zsurf->base.texture);
460 if (ztex->bo == bo) {
461 return true;
462 }
463 }
464
465 return false;
466 }
467
468 static void
469 vc4_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
470 {
471 struct vc4_context *vc4 = vc4_context(pctx);
472 struct pipe_surface *zsurf = vc4->framebuffer.zsbuf;
473
474 if (zsurf && zsurf->texture == prsc)
475 vc4->resolve &= ~(PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL);
476 }
477
478 static void
479 vc4_context_destroy(struct pipe_context *pctx)
480 {
481 struct vc4_context *vc4 = vc4_context(pctx);
482
483 if (vc4->blitter)
484 util_blitter_destroy(vc4->blitter);
485
486 if (vc4->primconvert)
487 util_primconvert_destroy(vc4->primconvert);
488
489 util_slab_destroy(&vc4->transfer_pool);
490
491 pipe_surface_reference(&vc4->framebuffer.cbufs[0], NULL);
492 pipe_surface_reference(&vc4->framebuffer.zsbuf, NULL);
493 vc4_bo_unreference(&vc4->tile_alloc);
494 vc4_bo_unreference(&vc4->tile_state);
495
496 vc4_program_fini(pctx);
497
498 ralloc_free(vc4);
499 }
500
501 struct pipe_context *
502 vc4_context_create(struct pipe_screen *pscreen, void *priv)
503 {
504 struct vc4_screen *screen = vc4_screen(pscreen);
505 struct vc4_context *vc4;
506
507 /* Prevent dumping of the shaders built during context setup. */
508 uint32_t saved_shaderdb_flag = vc4_debug & VC4_DEBUG_SHADERDB;
509 vc4_debug &= ~VC4_DEBUG_SHADERDB;
510
511 vc4 = rzalloc(NULL, struct vc4_context);
512 if (vc4 == NULL)
513 return NULL;
514 struct pipe_context *pctx = &vc4->base;
515
516 vc4->screen = screen;
517
518 pctx->screen = pscreen;
519 pctx->priv = priv;
520 pctx->destroy = vc4_context_destroy;
521 pctx->flush = vc4_pipe_flush;
522 pctx->invalidate_resource = vc4_invalidate_resource;
523
524 vc4_draw_init(pctx);
525 vc4_state_init(pctx);
526 vc4_program_init(pctx);
527 vc4_query_init(pctx);
528 vc4_resource_context_init(pctx);
529
530 vc4_init_cl(vc4, &vc4->bcl);
531 vc4_init_cl(vc4, &vc4->rcl);
532 vc4_init_cl(vc4, &vc4->shader_rec);
533 vc4_init_cl(vc4, &vc4->uniforms);
534 vc4_init_cl(vc4, &vc4->bo_handles);
535 vc4_init_cl(vc4, &vc4->bo_pointers);
536 vc4_draw_reset(vc4);
537
538 vc4->fd = screen->fd;
539
540 util_slab_create(&vc4->transfer_pool, sizeof(struct vc4_transfer),
541 16, UTIL_SLAB_SINGLETHREADED);
542 vc4->blitter = util_blitter_create(pctx);
543 if (!vc4->blitter)
544 goto fail;
545
546 vc4->primconvert = util_primconvert_create(pctx,
547 (1 << PIPE_PRIM_QUADS) - 1);
548 if (!vc4->primconvert)
549 goto fail;
550
551 vc4_debug |= saved_shaderdb_flag;
552
553 return &vc4->base;
554
555 fail:
556 pctx->destroy(pctx);
557 return NULL;
558 }