broadcom/vc5: Fix UIF surface size setup for ARB_fbo's mismatched sizes.
[mesa.git] / src / gallium / drivers / vc5 / vc5_rcl.c
1 /*
2 * Copyright © 2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/u_format.h"
25 #include "vc5_context.h"
26 #include "vc5_tiling.h"
27 #include "broadcom/cle/v3d_packet_v33_pack.h"
28
29 static void
30 load_raw(struct vc5_cl *cl, struct pipe_surface *psurf, int buffer)
31 {
32 struct vc5_surface *surf = vc5_surface(psurf);
33 struct vc5_resource *rsc = vc5_resource(psurf->texture);
34
35 cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
36 load.raw_mode = true;
37 load.buffer_to_load = buffer;
38 load.address = cl_address(rsc->bo, surf->offset);
39 load.padded_height_of_output_image_in_uif_blocks =
40 surf->padded_height_of_output_image_in_uif_blocks;
41 }
42 }
43
44 static void
45 store_raw(struct vc5_cl *cl, struct pipe_surface *psurf, int buffer,
46 bool color_clear, bool z_clear, bool s_clear)
47 {
48 struct vc5_surface *surf = vc5_surface(psurf);
49 struct vc5_resource *rsc = vc5_resource(psurf->texture);
50
51 cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
52 store.raw_mode = true;
53 store.buffer_to_store = buffer;
54 store.address = cl_address(rsc->bo, surf->offset);
55 store.disable_colour_buffers_clear_on_write = !color_clear;
56 store.disable_z_buffer_clear_on_write = !z_clear;
57 store.disable_stencil_buffer_clear_on_write = !s_clear;
58 store.padded_height_of_output_image_in_uif_blocks =
59 surf->padded_height_of_output_image_in_uif_blocks;
60 }
61 }
62
63 static int
64 zs_buffer_from_pipe_bits(int pipe_clear_bits)
65 {
66 switch (pipe_clear_bits & PIPE_CLEAR_DEPTHSTENCIL) {
67 case PIPE_CLEAR_DEPTHSTENCIL:
68 return ZSTENCIL;
69 case PIPE_CLEAR_DEPTH:
70 return Z;
71 case PIPE_CLEAR_STENCIL:
72 return STENCIL;
73 default:
74 return NONE;
75 }
76 }
77
78 /* The HW queues up the load until the tile coordinates show up, but can only
79 * track one at a time. If we need to do more than one load, then we need to
80 * flush out the previous load by emitting the tile coordinates and doing a
81 * dummy store.
82 */
83 static void
84 flush_last_load(struct vc5_cl *cl)
85 {
86 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
87 cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
88 store.buffer_to_store = NONE;
89 }
90 }
91
92 static void
93 vc5_rcl_emit_generic_per_tile_list(struct vc5_job *job, int last_cbuf)
94 {
95 /* Emit the generic list in our indirect state -- the rcl will just
96 * have pointers into it.
97 */
98 struct vc5_cl *cl = &job->indirect;
99 vc5_cl_ensure_space(cl, 200, 1);
100 struct vc5_cl_reloc tile_list_start = cl_get_address(cl);
101
102 const uint32_t pipe_clear_color_buffers = (PIPE_CLEAR_COLOR0 |
103 PIPE_CLEAR_COLOR1 |
104 PIPE_CLEAR_COLOR2 |
105 PIPE_CLEAR_COLOR3);
106 const uint32_t first_color_buffer_bit = (ffs(PIPE_CLEAR_COLOR0) - 1);
107
108 uint32_t read_but_not_cleared = job->resolve & ~job->cleared;
109
110 for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
111 uint32_t bit = PIPE_CLEAR_COLOR0 << i;
112 if (!(read_but_not_cleared & bit))
113 continue;
114
115 struct pipe_surface *psurf = job->cbufs[i];
116 if (!psurf || psurf->texture->nr_samples <= 1)
117 continue;
118
119 load_raw(cl, psurf, RENDER_TARGET_0 + i);
120 read_but_not_cleared &= ~bit;
121
122 if (read_but_not_cleared)
123 flush_last_load(cl);
124 }
125
126 if (job->zsbuf && job->zsbuf->texture->nr_samples > 1 &&
127 read_but_not_cleared & PIPE_CLEAR_DEPTHSTENCIL) {
128 load_raw(cl, job->zsbuf,
129 zs_buffer_from_pipe_bits(read_but_not_cleared));
130 read_but_not_cleared &= ~PIPE_CLEAR_DEPTHSTENCIL;
131 if (read_but_not_cleared)
132 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
133 }
134
135 /* The initial reload will be queued until we get the
136 * tile coordinates.
137 */
138 if (read_but_not_cleared) {
139 cl_emit(cl, RELOAD_TILE_COLOUR_BUFFER, load) {
140 load.disable_colour_buffer_load =
141 (~read_but_not_cleared & pipe_clear_color_buffers) >>
142 first_color_buffer_bit;
143 load.enable_z_load =
144 read_but_not_cleared & PIPE_CLEAR_DEPTH;
145 load.enable_stencil_load =
146 read_but_not_cleared & PIPE_CLEAR_STENCIL;
147 }
148 }
149
150 /* Tile Coordinates triggers the reload and sets where the stores
151 * go. There must be one per store packet.
152 */
153 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
154
155 cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
156
157 bool needs_color_clear = job->cleared & pipe_clear_color_buffers;
158 bool needs_z_clear = job->cleared & PIPE_CLEAR_DEPTH;
159 bool needs_s_clear = job->cleared & PIPE_CLEAR_STENCIL;
160 /* Note that only the color RT being stored will be cleared by a
161 * STORE_GENERAL, or all of them if the buffer is NONE.
162 */
163 bool msaa_color_clear = (needs_color_clear &&
164 (job->cleared & pipe_clear_color_buffers) ==
165 (job->resolve & pipe_clear_color_buffers));
166
167 uint32_t stores_pending = job->resolve;
168
169 /* Use raw stores for any MSAA surfaces. These output UIF tiled
170 * images where each 4x MSAA pixel is a 2x2 quad, and the format will
171 * be that of the internal_type/internal_bpp, rather than the format
172 * from GL's perspective.
173 */
174 for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
175 uint32_t bit = PIPE_CLEAR_COLOR0 << i;
176 if (!(job->resolve & bit))
177 continue;
178
179 struct pipe_surface *psurf = job->cbufs[i];
180 if (!psurf || psurf->texture->nr_samples <= 1)
181 continue;
182
183 stores_pending &= ~bit;
184 store_raw(cl, psurf, RENDER_TARGET_0 + i,
185 !stores_pending && msaa_color_clear,
186 !stores_pending && needs_z_clear,
187 !stores_pending && needs_s_clear);
188
189 if (stores_pending)
190 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
191 }
192
193 if (job->resolve & PIPE_CLEAR_DEPTHSTENCIL && job->zsbuf &&
194 job->zsbuf->texture->nr_samples > 1) {
195 stores_pending &= ~PIPE_CLEAR_DEPTHSTENCIL;
196 store_raw(cl, job->zsbuf,
197 zs_buffer_from_pipe_bits(job->resolve),
198 false,
199 !stores_pending && needs_z_clear,
200 !stores_pending && needs_s_clear);
201
202 if (stores_pending)
203 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
204 }
205
206 if (stores_pending) {
207 cl_emit(cl, STORE_MULTI_SAMPLE_RESOLVED_TILE_COLOR_BUFFER_EXTENDED, store) {
208
209 store.disable_color_buffer_write =
210 (~stores_pending >> first_color_buffer_bit) & 0xf;
211 store.enable_z_write = stores_pending & PIPE_CLEAR_DEPTH;
212 store.enable_stencil_write = stores_pending & PIPE_CLEAR_STENCIL;
213
214 /* Note that when set this will clear all of the color
215 * buffers.
216 */
217 store.disable_colour_buffers_clear_on_write =
218 !needs_color_clear;
219 store.disable_z_buffer_clear_on_write =
220 !needs_z_clear;
221 store.disable_stencil_buffer_clear_on_write =
222 !needs_s_clear;
223 };
224 } else if (needs_color_clear && !msaa_color_clear) {
225 /* If we had MSAA color stores that didn't match the set of
226 * MSAA color clears, then we need to clear the color buffers
227 * now.
228 */
229 cl_emit(&job->rcl, STORE_TILE_BUFFER_GENERAL, store) {
230 store.buffer_to_store = NONE;
231 }
232 }
233
234 cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
235
236 cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
237 branch.start = tile_list_start;
238 branch.end = cl_get_address(cl);
239 }
240 }
241
242 #define div_round_up(a, b) (((a) + (b) - 1) / b)
243
244 void
245 vc5_emit_rcl(struct vc5_job *job)
246 {
247 /* The RCL list should be empty. */
248 assert(!job->rcl.bo);
249
250 vc5_cl_ensure_space_with_branch(&job->rcl, 200 + 256 *
251 cl_packet_length(SUPERTILE_COORDINATES));
252 job->submit.rcl_start = job->rcl.bo->offset;
253 vc5_job_add_bo(job, job->rcl.bo);
254
255 int nr_cbufs = 0;
256 for (int i = 0; i < VC5_MAX_DRAW_BUFFERS; i++) {
257 if (job->cbufs[i])
258 nr_cbufs = i + 1;
259 }
260
261 /* Comon config must be the first TILE_RENDERING_MODE_CONFIGURATION
262 * and Z_STENCIL_CLEAR_VALUES must be last. The ones in between are
263 * optional updates to the previous HW state.
264 */
265 cl_emit(&job->rcl, TILE_RENDERING_MODE_CONFIGURATION_COMMON_CONFIGURATION,
266 config) {
267 config.enable_z_store = job->resolve & PIPE_CLEAR_DEPTH;
268 config.enable_stencil_store = job->resolve & PIPE_CLEAR_STENCIL;
269
270 config.early_z_disable = !job->uses_early_z;
271
272 config.image_width_pixels = job->draw_width;
273 config.image_height_pixels = job->draw_height;
274
275 config.number_of_render_targets_minus_1 =
276 MAX2(nr_cbufs, 1) - 1;
277
278 config.multisample_mode_4x = job->msaa;
279
280 config.maximum_bpp_of_all_render_targets = job->internal_bpp;
281 }
282
283 for (int i = 0; i < nr_cbufs; i++) {
284 struct pipe_surface *psurf = job->cbufs[i];
285 if (!psurf)
286 continue;
287 struct vc5_surface *surf = vc5_surface(psurf);
288 struct vc5_resource *rsc = vc5_resource(psurf->texture);
289
290 uint32_t config_pad = 0;
291 uint32_t clear_pad = 0;
292
293 /* XXX: Set the pad for raster. */
294 if (surf->tiling == VC5_TILING_UIF_NO_XOR ||
295 surf->tiling == VC5_TILING_UIF_XOR) {
296 int uif_block_height = vc5_utile_height(rsc->cpp) * 2;
297 uint32_t implicit_padded_height = (align(job->draw_height, uif_block_height) /
298 uif_block_height);
299 if (surf->padded_height_of_output_image_in_uif_blocks -
300 implicit_padded_height < 15) {
301 config_pad = (surf->padded_height_of_output_image_in_uif_blocks -
302 implicit_padded_height);
303 } else {
304 config_pad = 15;
305 clear_pad = surf->padded_height_of_output_image_in_uif_blocks;
306 }
307 }
308
309 cl_emit(&job->rcl, TILE_RENDERING_MODE_CONFIGURATION_RENDER_TARGET_CONFIG, rt) {
310 rt.address = cl_address(rsc->bo, surf->offset);
311 rt.internal_type = surf->internal_type;
312 rt.output_image_format = surf->format;
313 rt.memory_format = surf->tiling;
314 rt.internal_bpp = surf->internal_bpp;
315 rt.render_target_number = i;
316 rt.pad = config_pad;
317
318 if (job->resolve & PIPE_CLEAR_COLOR0 << i)
319 rsc->writes++;
320 }
321
322 cl_emit(&job->rcl, TILE_RENDERING_MODE_CONFIGURATION_CLEAR_COLORS_PART1,
323 clear) {
324 clear.clear_color_low_32_bits = job->clear_color[i][0];
325 clear.clear_color_next_24_bits = job->clear_color[i][1] & 0xffffff;
326 clear.render_target_number = i;
327 };
328
329 if (surf->internal_bpp >= INTERNAL_BPP_64) {
330 cl_emit(&job->rcl, TILE_RENDERING_MODE_CONFIGURATION_CLEAR_COLORS_PART2,
331 clear) {
332 clear.clear_color_mid_low_32_bits =
333 ((job->clear_color[i][1] >> 24) |
334 (job->clear_color[i][2] << 8));
335 clear.clear_color_mid_high_24_bits =
336 ((job->clear_color[i][2] >> 24) |
337 ((job->clear_color[i][3] & 0xffff) << 8));
338 clear.render_target_number = i;
339 };
340 }
341
342 if (surf->internal_bpp >= INTERNAL_BPP_128 || clear_pad) {
343 cl_emit(&job->rcl, TILE_RENDERING_MODE_CONFIGURATION_CLEAR_COLORS_PART3,
344 clear) {
345 clear.uif_padded_height_in_uif_blocks = clear_pad;
346 clear.clear_color_high_16_bits = job->clear_color[i][3] >> 16;
347 clear.render_target_number = i;
348 };
349 }
350 }
351
352 /* TODO: Don't bother emitting if we don't load/clear Z/S. */
353 if (job->zsbuf) {
354 struct pipe_surface *psurf = job->zsbuf;
355 struct vc5_surface *surf = vc5_surface(psurf);
356 struct vc5_resource *rsc = vc5_resource(psurf->texture);
357
358 cl_emit(&job->rcl, TILE_RENDERING_MODE_CONFIGURATION_Z_STENCIL_CONFIG, zs) {
359 zs.address = cl_address(rsc->bo, surf->offset);
360
361 zs.internal_type = surf->internal_type;
362 zs.output_image_format = surf->format;
363 zs.padded_height_of_output_image_in_uif_blocks =
364 surf->padded_height_of_output_image_in_uif_blocks;
365
366 assert(surf->tiling != VC5_TILING_RASTER);
367 zs.memory_format = surf->tiling;
368 }
369
370 if (job->resolve & PIPE_CLEAR_DEPTHSTENCIL)
371 rsc->writes++;
372 }
373
374 /* Ends rendering mode config. */
375 cl_emit(&job->rcl, TILE_RENDERING_MODE_CONFIGURATION_Z_STENCIL_CLEAR_VALUES,
376 clear) {
377 clear.z_clear_value = job->clear_z;
378 clear.stencil_vg_mask_clear_value = job->clear_s;
379 };
380
381 /* Always set initial block size before the first branch, which needs
382 * to match the value from binning mode config.
383 */
384 cl_emit(&job->rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
385 init.use_auto_chained_tile_lists = true;
386 init.size_of_first_block_in_chained_tile_lists =
387 TILE_ALLOCATION_BLOCK_SIZE_64B;
388 }
389
390 uint32_t supertile_w = 1, supertile_h = 1;
391
392 /* If doing multicore binning, we would need to initialize each core's
393 * tile list here.
394 */
395 cl_emit(&job->rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
396 list.address = cl_address(job->tile_alloc, 0);
397 }
398
399 cl_emit(&job->rcl, MULTICORE_RENDERING_SUPERTILE_CONFIGURATION, config) {
400 uint32_t frame_w_in_supertiles, frame_h_in_supertiles;
401 const uint32_t max_supertiles = 256;
402
403 /* Size up our supertiles until we get under the limit. */
404 for (;;) {
405 frame_w_in_supertiles = div_round_up(job->draw_tiles_x,
406 supertile_w);
407 frame_h_in_supertiles = div_round_up(job->draw_tiles_y,
408 supertile_h);
409 if (frame_w_in_supertiles * frame_h_in_supertiles <
410 max_supertiles) {
411 break;
412 }
413
414 if (supertile_w < supertile_h)
415 supertile_w++;
416 else
417 supertile_h++;
418 }
419
420 config.total_frame_width_in_tiles = job->draw_tiles_x;
421 config.total_frame_height_in_tiles = job->draw_tiles_y;
422
423 config.supertile_width_in_tiles_minus_1 = supertile_w - 1;
424 config.supertile_height_in_tiles_minus_1 = supertile_h - 1;
425
426 config.total_frame_width_in_supertiles = frame_w_in_supertiles;
427 config.total_frame_height_in_supertiles = frame_h_in_supertiles;
428 }
429
430 /* Start by clearing the tile buffer. */
431 cl_emit(&job->rcl, TILE_COORDINATES, coords) {
432 coords.tile_column_number = 0;
433 coords.tile_row_number = 0;
434 }
435
436 cl_emit(&job->rcl, STORE_TILE_BUFFER_GENERAL, store) {
437 store.buffer_to_store = NONE;
438 }
439
440 cl_emit(&job->rcl, FLUSH_VCD_CACHE, flush);
441
442 vc5_rcl_emit_generic_per_tile_list(job, nr_cbufs - 1);
443
444 cl_emit(&job->rcl, WAIT_ON_SEMAPHORE, sem);
445
446 /* XXX: Use Morton order */
447 uint32_t supertile_w_in_pixels = job->tile_width * supertile_w;
448 uint32_t supertile_h_in_pixels = job->tile_height * supertile_h;
449 uint32_t min_x_supertile = job->draw_min_x / supertile_w_in_pixels;
450 uint32_t min_y_supertile = job->draw_min_y / supertile_h_in_pixels;
451 uint32_t max_x_supertile = (job->draw_max_x - 1) / supertile_w_in_pixels;
452 uint32_t max_y_supertile = (job->draw_max_y - 1) / supertile_h_in_pixels;
453
454 for (int y = min_y_supertile; y <= max_y_supertile; y++) {
455 for (int x = min_x_supertile; x <= max_x_supertile; x++) {
456 cl_emit(&job->rcl, SUPERTILE_COORDINATES, coords) {
457 coords.column_number_in_supertiles = x;
458 coords.row_number_in_supertiles = y;
459 }
460 }
461 }
462
463 cl_emit(&job->rcl, END_OF_RENDERING, end);
464 }