i965/miptree: Prepare intel_miptree_copy() for isl based
[mesa.git] / src / mesa / drivers / dri / i965 / intel_blit.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "main/mtypes.h"
27 #include "main/blit.h"
28 #include "main/context.h"
29 #include "main/enums.h"
30 #include "main/fbobject.h"
31
32 #include "brw_context.h"
33 #include "brw_defines.h"
34 #include "intel_blit.h"
35 #include "intel_buffers.h"
36 #include "intel_fbo.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_mipmap_tree.h"
39
40 #define FILE_DEBUG_FLAG DEBUG_BLIT
41
42 static void
43 intel_miptree_set_alpha_to_one(struct brw_context *brw,
44 struct intel_mipmap_tree *mt,
45 int x, int y, int width, int height);
46
47 static GLuint translate_raster_op(GLenum logicop)
48 {
49 switch(logicop) {
50 case GL_CLEAR: return 0x00;
51 case GL_AND: return 0x88;
52 case GL_AND_REVERSE: return 0x44;
53 case GL_COPY: return 0xCC;
54 case GL_AND_INVERTED: return 0x22;
55 case GL_NOOP: return 0xAA;
56 case GL_XOR: return 0x66;
57 case GL_OR: return 0xEE;
58 case GL_NOR: return 0x11;
59 case GL_EQUIV: return 0x99;
60 case GL_INVERT: return 0x55;
61 case GL_OR_REVERSE: return 0xDD;
62 case GL_COPY_INVERTED: return 0x33;
63 case GL_OR_INVERTED: return 0xBB;
64 case GL_NAND: return 0x77;
65 case GL_SET: return 0xFF;
66 default: return 0;
67 }
68 }
69
70 static uint32_t
71 br13_for_cpp(int cpp)
72 {
73 switch (cpp) {
74 case 16:
75 return BR13_32323232;
76 case 8:
77 return BR13_16161616;
78 case 4:
79 return BR13_8888;
80 case 2:
81 return BR13_565;
82 case 1:
83 return BR13_8;
84 default:
85 unreachable("not reached");
86 }
87 }
88
89 /**
90 * Emits the packet for switching the blitter from X to Y tiled or back.
91 *
92 * This has to be called in a single BEGIN_BATCH_BLT_TILED() /
93 * ADVANCE_BATCH_TILED(). This is because BCS_SWCTRL is saved and restored as
94 * part of the power context, not a render context, and if the batchbuffer was
95 * to get flushed between setting and blitting, or blitting and restoring, our
96 * tiling state would leak into other unsuspecting applications (like the X
97 * server).
98 */
99 static uint32_t *
100 set_blitter_tiling(struct brw_context *brw,
101 bool dst_y_tiled, bool src_y_tiled,
102 uint32_t *__map)
103 {
104 assert(brw->gen >= 6);
105
106 /* Idle the blitter before we update how tiling is interpreted. */
107 OUT_BATCH(MI_FLUSH_DW);
108 OUT_BATCH(0);
109 OUT_BATCH(0);
110 OUT_BATCH(0);
111
112 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
113 OUT_BATCH(BCS_SWCTRL);
114 OUT_BATCH((BCS_SWCTRL_DST_Y | BCS_SWCTRL_SRC_Y) << 16 |
115 (dst_y_tiled ? BCS_SWCTRL_DST_Y : 0) |
116 (src_y_tiled ? BCS_SWCTRL_SRC_Y : 0));
117 return __map;
118 }
119 #define SET_BLITTER_TILING(...) __map = set_blitter_tiling(__VA_ARGS__, __map)
120
121 #define BEGIN_BATCH_BLT_TILED(n, dst_y_tiled, src_y_tiled) \
122 BEGIN_BATCH_BLT(n + ((dst_y_tiled || src_y_tiled) ? 14 : 0)); \
123 if (dst_y_tiled || src_y_tiled) \
124 SET_BLITTER_TILING(brw, dst_y_tiled, src_y_tiled)
125
126 #define ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled) \
127 if (dst_y_tiled || src_y_tiled) \
128 SET_BLITTER_TILING(brw, false, false); \
129 ADVANCE_BATCH()
130
131 static int
132 blt_pitch(struct intel_mipmap_tree *mt)
133 {
134 int pitch = mt->surf.row_pitch;
135 if (mt->surf.tiling != ISL_TILING_LINEAR)
136 pitch /= 4;
137 return pitch;
138 }
139
140 bool
141 intel_miptree_blit_compatible_formats(mesa_format src, mesa_format dst)
142 {
143 /* The BLT doesn't handle sRGB conversion */
144 assert(src == _mesa_get_srgb_format_linear(src));
145 assert(dst == _mesa_get_srgb_format_linear(dst));
146
147 /* No swizzle or format conversions possible, except... */
148 if (src == dst)
149 return true;
150
151 /* ...we can either discard the alpha channel when going from A->X,
152 * or we can fill the alpha channel with 0xff when going from X->A
153 */
154 if (src == MESA_FORMAT_B8G8R8A8_UNORM || src == MESA_FORMAT_B8G8R8X8_UNORM)
155 return (dst == MESA_FORMAT_B8G8R8A8_UNORM ||
156 dst == MESA_FORMAT_B8G8R8X8_UNORM);
157
158 if (src == MESA_FORMAT_R8G8B8A8_UNORM || src == MESA_FORMAT_R8G8B8X8_UNORM)
159 return (dst == MESA_FORMAT_R8G8B8A8_UNORM ||
160 dst == MESA_FORMAT_R8G8B8X8_UNORM);
161
162 return false;
163 }
164
165 static void
166 get_blit_intratile_offset_el(const struct brw_context *brw,
167 struct intel_mipmap_tree *mt,
168 uint32_t total_x_offset_el,
169 uint32_t total_y_offset_el,
170 uint32_t *base_address_offset,
171 uint32_t *x_offset_el,
172 uint32_t *y_offset_el)
173 {
174 enum isl_tiling tiling = intel_miptree_get_isl_tiling(mt);
175 isl_tiling_get_intratile_offset_el(tiling, mt->cpp * 8, mt->surf.row_pitch,
176 total_x_offset_el, total_y_offset_el,
177 base_address_offset,
178 x_offset_el, y_offset_el);
179 if (tiling == ISL_TILING_LINEAR) {
180 /* From the Broadwell PRM docs for XY_SRC_COPY_BLT::SourceBaseAddress:
181 *
182 * "Base address of the destination surface: X=0, Y=0. Lower 32bits
183 * of the 48bit addressing. When Src Tiling is enabled (Bit_15
184 * enabled), this address must be 4KB-aligned. When Tiling is not
185 * enabled, this address should be CL (64byte) aligned."
186 *
187 * The offsets we get from ISL in the tiled case are already aligned.
188 * In the linear case, we need to do some of our own aligning.
189 */
190 assert(mt->surf.row_pitch % 64 == 0);
191 uint32_t delta = *base_address_offset & 63;
192 assert(delta % mt->cpp == 0);
193 *base_address_offset -= delta;
194 *x_offset_el += delta / mt->cpp;
195 } else {
196 assert(*base_address_offset % 4096 == 0);
197 }
198 }
199
200 static bool
201 emit_miptree_blit(struct brw_context *brw,
202 struct intel_mipmap_tree *src_mt,
203 uint32_t src_x, uint32_t src_y,
204 struct intel_mipmap_tree *dst_mt,
205 uint32_t dst_x, uint32_t dst_y,
206 uint32_t width, uint32_t height,
207 bool reverse, GLenum logicop)
208 {
209 /* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
210 * Data Size Limitations):
211 *
212 * The BLT engine is capable of transferring very large quantities of
213 * graphics data. Any graphics data read from and written to the
214 * destination is permitted to represent a number of pixels that
215 * occupies up to 65,536 scan lines and up to 32,768 bytes per scan line
216 * at the destination. The maximum number of pixels that may be
217 * represented per scan line’s worth of graphics data depends on the
218 * color depth.
219 *
220 * The blitter's pitch is a signed 16-bit integer, but measured in bytes
221 * for linear surfaces and DWords for tiled surfaces. So the maximum
222 * pitch is 32k linear and 128k tiled.
223 */
224 if (blt_pitch(src_mt) >= 32768 || blt_pitch(dst_mt) >= 32768) {
225 perf_debug("Falling back due to >= 32k/128k pitch\n");
226 return false;
227 }
228
229 /* We need to split the blit into chunks that each fit within the blitter's
230 * restrictions. We can't use a chunk size of 32768 because we need to
231 * ensure that src_tile_x + chunk_size fits. We choose 16384 because it's
232 * a nice round power of two, big enough that performance won't suffer, and
233 * small enough to guarantee everything fits.
234 */
235 const uint32_t max_chunk_size = 16384;
236
237 for (uint32_t chunk_x = 0; chunk_x < width; chunk_x += max_chunk_size) {
238 for (uint32_t chunk_y = 0; chunk_y < height; chunk_y += max_chunk_size) {
239 const uint32_t chunk_w = MIN2(max_chunk_size, width - chunk_x);
240 const uint32_t chunk_h = MIN2(max_chunk_size, height - chunk_y);
241
242 uint32_t src_offset, src_tile_x, src_tile_y;
243 get_blit_intratile_offset_el(brw, src_mt,
244 src_x + chunk_x, src_y + chunk_y,
245 &src_offset, &src_tile_x, &src_tile_y);
246
247 uint32_t dst_offset, dst_tile_x, dst_tile_y;
248 get_blit_intratile_offset_el(brw, dst_mt,
249 dst_x + chunk_x, dst_y + chunk_y,
250 &dst_offset, &dst_tile_x, &dst_tile_y);
251
252 if (!intelEmitCopyBlit(brw,
253 src_mt->cpp,
254 reverse ? -src_mt->surf.row_pitch :
255 src_mt->surf.row_pitch,
256 src_mt->bo, src_mt->offset + src_offset,
257 src_mt->surf.tiling,
258 dst_mt->surf.row_pitch,
259 dst_mt->bo, dst_mt->offset + dst_offset,
260 dst_mt->surf.tiling,
261 src_tile_x, src_tile_y,
262 dst_tile_x, dst_tile_y,
263 chunk_w, chunk_h,
264 logicop)) {
265 /* If this is ever going to fail, it will fail on the first chunk */
266 assert(chunk_x == 0 && chunk_y == 0);
267 return false;
268 }
269 }
270 }
271
272 return true;
273 }
274
275 /**
276 * Implements a rectangular block transfer (blit) of pixels between two
277 * miptrees.
278 *
279 * Our blitter can operate on 1, 2, or 4-byte-per-pixel data, with generous,
280 * but limited, pitches and sizes allowed.
281 *
282 * The src/dst coordinates are relative to the given level/slice of the
283 * miptree.
284 *
285 * If @src_flip or @dst_flip is set, then the rectangle within that miptree
286 * will be inverted (including scanline order) when copying. This is common
287 * in GL when copying between window system and user-created
288 * renderbuffers/textures.
289 */
290 bool
291 intel_miptree_blit(struct brw_context *brw,
292 struct intel_mipmap_tree *src_mt,
293 int src_level, int src_slice,
294 uint32_t src_x, uint32_t src_y, bool src_flip,
295 struct intel_mipmap_tree *dst_mt,
296 int dst_level, int dst_slice,
297 uint32_t dst_x, uint32_t dst_y, bool dst_flip,
298 uint32_t width, uint32_t height,
299 GLenum logicop)
300 {
301 /* The blitter doesn't understand multisampling at all. */
302 if (src_mt->surf.samples > 1 || dst_mt->surf.samples > 1)
303 return false;
304
305 /* No sRGB decode or encode is done by the hardware blitter, which is
306 * consistent with what we want in many callers (glCopyTexSubImage(),
307 * texture validation, etc.).
308 */
309 mesa_format src_format = _mesa_get_srgb_format_linear(src_mt->format);
310 mesa_format dst_format = _mesa_get_srgb_format_linear(dst_mt->format);
311
312 /* The blitter doesn't support doing any format conversions. We do also
313 * support blitting ARGB8888 to XRGB8888 (trivial, the values dropped into
314 * the X channel don't matter), and XRGB8888 to ARGB8888 by setting the A
315 * channel to 1.0 at the end.
316 */
317 if (!intel_miptree_blit_compatible_formats(src_format, dst_format)) {
318 perf_debug("%s: Can't use hardware blitter from %s to %s, "
319 "falling back.\n", __func__,
320 _mesa_get_format_name(src_format),
321 _mesa_get_format_name(dst_format));
322 return false;
323 }
324
325 /* The blitter has no idea about HiZ or fast color clears, so we need to
326 * resolve the miptrees before we do anything.
327 */
328 intel_miptree_access_raw(brw, src_mt, src_level, src_slice, false);
329 intel_miptree_access_raw(brw, dst_mt, dst_level, dst_slice, true);
330
331 if (src_flip) {
332 const unsigned h0 = src_mt->surf.size > 0 ?
333 src_mt->surf.phys_level0_sa.height : src_mt->physical_height0;
334 src_y = minify(h0, src_level - src_mt->first_level) - src_y - height;
335 }
336
337 if (dst_flip) {
338 const unsigned h0 = dst_mt->surf.size > 0 ?
339 dst_mt->surf.phys_level0_sa.height : dst_mt->physical_height0;
340 dst_y = minify(h0, dst_level - dst_mt->first_level) - dst_y - height;
341 }
342
343 uint32_t src_image_x, src_image_y, dst_image_x, dst_image_y;
344 intel_miptree_get_image_offset(src_mt, src_level, src_slice,
345 &src_image_x, &src_image_y);
346 intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice,
347 &dst_image_x, &dst_image_y);
348 src_x += src_image_x;
349 src_y += src_image_y;
350 dst_x += dst_image_x;
351 dst_y += dst_image_y;
352
353 if (!emit_miptree_blit(brw, src_mt, src_x, src_y,
354 dst_mt, dst_x, dst_y, width, height,
355 src_flip != dst_flip, logicop)) {
356 return false;
357 }
358
359 /* XXX This could be done in a single pass using XY_FULL_MONO_PATTERN_BLT */
360 if (_mesa_get_format_bits(src_format, GL_ALPHA_BITS) == 0 &&
361 _mesa_get_format_bits(dst_format, GL_ALPHA_BITS) > 0) {
362 intel_miptree_set_alpha_to_one(brw, dst_mt,
363 dst_x, dst_y,
364 width, height);
365 }
366
367 return true;
368 }
369
370 bool
371 intel_miptree_copy(struct brw_context *brw,
372 struct intel_mipmap_tree *src_mt,
373 int src_level, int src_slice,
374 uint32_t src_x, uint32_t src_y,
375 struct intel_mipmap_tree *dst_mt,
376 int dst_level, int dst_slice,
377 uint32_t dst_x, uint32_t dst_y,
378 uint32_t src_width, uint32_t src_height)
379 {
380 /* The blitter doesn't understand multisampling at all. */
381 if (src_mt->surf.samples > 1 || dst_mt->surf.samples > 1)
382 return false;
383
384 if (src_mt->format == MESA_FORMAT_S_UINT8)
385 return false;
386
387 /* The blitter has no idea about HiZ or fast color clears, so we need to
388 * resolve the miptrees before we do anything.
389 */
390 intel_miptree_access_raw(brw, src_mt, src_level, src_slice, false);
391 intel_miptree_access_raw(brw, dst_mt, dst_level, dst_slice, true);
392
393 uint32_t src_image_x, src_image_y;
394 intel_miptree_get_image_offset(src_mt, src_level, src_slice,
395 &src_image_x, &src_image_y);
396
397 if (_mesa_is_format_compressed(src_mt->format)) {
398 GLuint bw, bh;
399 _mesa_get_format_block_size(src_mt->format, &bw, &bh);
400
401 /* Compressed textures need not have dimensions that are a multiple of
402 * the block size. Rectangles in compressed textures do need to be a
403 * multiple of the block size. The one exception is that the right and
404 * bottom edges may be at the right or bottom edge of the miplevel even
405 * if it's not aligned.
406 */
407 assert(src_x % bw == 0);
408 assert(src_y % bh == 0);
409
410 if (src_mt->surf.size > 0) {
411 assert(src_width % bw == 0 ||
412 src_x + src_width ==
413 minify(src_mt->surf.logical_level0_px.width, src_level));
414 assert(src_height % bh == 0 ||
415 src_y + src_height ==
416 minify(src_mt->surf.logical_level0_px.height, src_level));
417 } else {
418 assert(src_width % bw == 0 ||
419 src_x + src_width ==
420 minify(src_mt->logical_width0, src_level));
421 assert(src_height % bh == 0 ||
422 src_y + src_height ==
423 minify(src_mt->logical_height0, src_level));
424 }
425
426 src_x /= (int)bw;
427 src_y /= (int)bh;
428 src_width = DIV_ROUND_UP(src_width, (int)bw);
429 src_height = DIV_ROUND_UP(src_height, (int)bh);
430 }
431 src_x += src_image_x;
432 src_y += src_image_y;
433
434 uint32_t dst_image_x, dst_image_y;
435 intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice,
436 &dst_image_x, &dst_image_y);
437
438 if (_mesa_is_format_compressed(dst_mt->format)) {
439 GLuint bw, bh;
440 _mesa_get_format_block_size(dst_mt->format, &bw, &bh);
441
442 assert(dst_x % bw == 0);
443 assert(dst_y % bh == 0);
444
445 dst_x /= (int)bw;
446 dst_y /= (int)bh;
447 }
448 dst_x += dst_image_x;
449 dst_y += dst_image_y;
450
451 return emit_miptree_blit(brw, src_mt, src_x, src_y,
452 dst_mt, dst_x, dst_y,
453 src_width, src_height, false, GL_COPY);
454 }
455
456 static bool
457 alignment_valid(struct brw_context *brw, unsigned offset,
458 enum isl_tiling tiling)
459 {
460 /* Tiled buffers must be page-aligned (4K). */
461 if (tiling != ISL_TILING_LINEAR)
462 return (offset & 4095) == 0;
463
464 /* On Gen8+, linear buffers must be cacheline-aligned. */
465 if (brw->gen >= 8)
466 return (offset & 63) == 0;
467
468 return true;
469 }
470
471 static uint32_t
472 xy_blit_cmd(enum isl_tiling src_tiling, enum isl_tiling dst_tiling,
473 uint32_t cpp)
474 {
475 uint32_t CMD = 0;
476
477 assert(cpp <= 4);
478 switch (cpp) {
479 case 1:
480 case 2:
481 CMD = XY_SRC_COPY_BLT_CMD;
482 break;
483 case 4:
484 CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
485 break;
486 default:
487 unreachable("not reached");
488 }
489
490 if (dst_tiling != ISL_TILING_LINEAR)
491 CMD |= XY_DST_TILED;
492
493 if (src_tiling != ISL_TILING_LINEAR)
494 CMD |= XY_SRC_TILED;
495
496 return CMD;
497 }
498
499 /* Copy BitBlt
500 */
501 bool
502 intelEmitCopyBlit(struct brw_context *brw,
503 GLuint cpp,
504 int32_t src_pitch,
505 struct brw_bo *src_buffer,
506 GLuint src_offset,
507 enum isl_tiling src_tiling,
508 int32_t dst_pitch,
509 struct brw_bo *dst_buffer,
510 GLuint dst_offset,
511 enum isl_tiling dst_tiling,
512 GLshort src_x, GLshort src_y,
513 GLshort dst_x, GLshort dst_y,
514 GLshort w, GLshort h,
515 GLenum logic_op)
516 {
517 GLuint CMD, BR13;
518 int dst_y2 = dst_y + h;
519 int dst_x2 = dst_x + w;
520 bool dst_y_tiled = dst_tiling == ISL_TILING_Y0;
521 bool src_y_tiled = src_tiling == ISL_TILING_Y0;
522 uint32_t src_tile_w, src_tile_h;
523 uint32_t dst_tile_w, dst_tile_h;
524
525 if ((dst_y_tiled || src_y_tiled) && brw->gen < 6)
526 return false;
527
528 const unsigned bo_sizes = dst_buffer->size + src_buffer->size;
529
530 /* do space check before going any further */
531 if (!brw_batch_has_aperture_space(brw, bo_sizes))
532 intel_batchbuffer_flush(brw);
533
534 if (!brw_batch_has_aperture_space(brw, bo_sizes))
535 return false;
536
537 unsigned length = brw->gen >= 8 ? 10 : 8;
538
539 intel_batchbuffer_require_space(brw, length * 4, BLT_RING);
540 DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
541 __func__,
542 src_buffer, src_pitch, src_offset, src_x, src_y,
543 dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
544
545 intel_get_tile_dims(src_tiling, cpp, &src_tile_w, &src_tile_h);
546 intel_get_tile_dims(dst_tiling, cpp, &dst_tile_w, &dst_tile_h);
547
548 /* For Tiled surfaces, the pitch has to be a multiple of the Tile width
549 * (X direction width of the Tile). This is ensured while allocating the
550 * buffer object.
551 */
552 assert(src_tiling == ISL_TILING_LINEAR || (src_pitch % src_tile_w) == 0);
553 assert(dst_tiling == ISL_TILING_LINEAR || (dst_pitch % dst_tile_w) == 0);
554
555 /* For big formats (such as floating point), do the copy using 16 or
556 * 32bpp and multiply the coordinates.
557 */
558 if (cpp > 4) {
559 if (cpp % 4 == 2) {
560 dst_x *= cpp / 2;
561 dst_x2 *= cpp / 2;
562 src_x *= cpp / 2;
563 cpp = 2;
564 } else {
565 assert(cpp % 4 == 0);
566 dst_x *= cpp / 4;
567 dst_x2 *= cpp / 4;
568 src_x *= cpp / 4;
569 cpp = 4;
570 }
571 }
572
573 if (!alignment_valid(brw, dst_offset, dst_tiling))
574 return false;
575 if (!alignment_valid(brw, src_offset, src_tiling))
576 return false;
577
578 /* Blit pitch must be dword-aligned. Otherwise, the hardware appears to drop
579 * the low bits. Offsets must be naturally aligned.
580 */
581 if (src_pitch % 4 != 0 || src_offset % cpp != 0 ||
582 dst_pitch % 4 != 0 || dst_offset % cpp != 0)
583 return false;
584
585 assert(cpp <= 4);
586 BR13 = br13_for_cpp(cpp) | translate_raster_op(logic_op) << 16;
587
588 CMD = xy_blit_cmd(src_tiling, dst_tiling, cpp);
589
590 /* For tiled source and destination, pitch value should be specified
591 * as a number of Dwords.
592 */
593 if (dst_tiling != ISL_TILING_LINEAR)
594 dst_pitch /= 4;
595
596 if (src_tiling != ISL_TILING_LINEAR)
597 src_pitch /= 4;
598
599 if (dst_y2 <= dst_y || dst_x2 <= dst_x)
600 return true;
601
602 assert(dst_x < dst_x2);
603 assert(dst_y < dst_y2);
604
605 BEGIN_BATCH_BLT_TILED(length, dst_y_tiled, src_y_tiled);
606 OUT_BATCH(CMD | (length - 2));
607 OUT_BATCH(BR13 | (uint16_t)dst_pitch);
608 OUT_BATCH(SET_FIELD(dst_y, BLT_Y) | SET_FIELD(dst_x, BLT_X));
609 OUT_BATCH(SET_FIELD(dst_y2, BLT_Y) | SET_FIELD(dst_x2, BLT_X));
610 if (brw->gen >= 8) {
611 OUT_RELOC64(dst_buffer,
612 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
613 dst_offset);
614 } else {
615 OUT_RELOC(dst_buffer,
616 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
617 dst_offset);
618 }
619 OUT_BATCH(SET_FIELD(src_y, BLT_Y) | SET_FIELD(src_x, BLT_X));
620 OUT_BATCH((uint16_t)src_pitch);
621 if (brw->gen >= 8) {
622 OUT_RELOC64(src_buffer,
623 I915_GEM_DOMAIN_RENDER, 0,
624 src_offset);
625 } else {
626 OUT_RELOC(src_buffer,
627 I915_GEM_DOMAIN_RENDER, 0,
628 src_offset);
629 }
630
631 ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
632
633 brw_emit_mi_flush(brw);
634
635 return true;
636 }
637
638 bool
639 intelEmitImmediateColorExpandBlit(struct brw_context *brw,
640 GLuint cpp,
641 GLubyte *src_bits, GLuint src_size,
642 GLuint fg_color,
643 GLshort dst_pitch,
644 struct brw_bo *dst_buffer,
645 GLuint dst_offset,
646 enum isl_tiling dst_tiling,
647 GLshort x, GLshort y,
648 GLshort w, GLshort h,
649 GLenum logic_op)
650 {
651 int dwords = ALIGN(src_size, 8) / 4;
652 uint32_t opcode, br13, blit_cmd;
653
654 if (dst_tiling != ISL_TILING_LINEAR) {
655 if (dst_offset & 4095)
656 return false;
657 if (dst_tiling == ISL_TILING_Y0)
658 return false;
659 }
660
661 assert((logic_op >= GL_CLEAR) && (logic_op <= (GL_CLEAR + 0x0f)));
662 assert(dst_pitch > 0);
663
664 if (w < 0 || h < 0)
665 return true;
666
667 DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d, %d bytes %d dwords\n",
668 __func__,
669 dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
670
671 unsigned xy_setup_blt_length = brw->gen >= 8 ? 10 : 8;
672 intel_batchbuffer_require_space(brw, (xy_setup_blt_length * 4) +
673 (3 * 4) + dwords * 4, BLT_RING);
674
675 opcode = XY_SETUP_BLT_CMD;
676 if (cpp == 4)
677 opcode |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
678 if (dst_tiling != ISL_TILING_LINEAR) {
679 opcode |= XY_DST_TILED;
680 dst_pitch /= 4;
681 }
682
683 br13 = dst_pitch | (translate_raster_op(logic_op) << 16) | (1 << 29);
684 br13 |= br13_for_cpp(cpp);
685
686 blit_cmd = XY_TEXT_IMMEDIATE_BLIT_CMD | XY_TEXT_BYTE_PACKED; /* packing? */
687 if (dst_tiling != ISL_TILING_LINEAR)
688 blit_cmd |= XY_DST_TILED;
689
690 BEGIN_BATCH_BLT(xy_setup_blt_length + 3);
691 OUT_BATCH(opcode | (xy_setup_blt_length - 2));
692 OUT_BATCH(br13);
693 OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
694 OUT_BATCH((100 << 16) | 100); /* clip x2, y2 */
695 if (brw->gen >= 8) {
696 OUT_RELOC64(dst_buffer,
697 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
698 dst_offset);
699 } else {
700 OUT_RELOC(dst_buffer,
701 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
702 dst_offset);
703 }
704 OUT_BATCH(0); /* bg */
705 OUT_BATCH(fg_color); /* fg */
706 OUT_BATCH(0); /* pattern base addr */
707 if (brw->gen >= 8)
708 OUT_BATCH(0);
709
710 OUT_BATCH(blit_cmd | ((3 - 2) + dwords));
711 OUT_BATCH(SET_FIELD(y, BLT_Y) | SET_FIELD(x, BLT_X));
712 OUT_BATCH(SET_FIELD(y + h, BLT_Y) | SET_FIELD(x + w, BLT_X));
713 ADVANCE_BATCH();
714
715 intel_batchbuffer_data(brw, src_bits, dwords * 4, BLT_RING);
716
717 brw_emit_mi_flush(brw);
718
719 return true;
720 }
721
722 /* We don't have a memmove-type blit like some other hardware, so we'll do a
723 * rectangular blit covering a large space, then emit 1-scanline blit at the
724 * end to cover the last if we need.
725 */
726 void
727 intel_emit_linear_blit(struct brw_context *brw,
728 struct brw_bo *dst_bo,
729 unsigned int dst_offset,
730 struct brw_bo *src_bo,
731 unsigned int src_offset,
732 unsigned int size)
733 {
734 struct gl_context *ctx = &brw->ctx;
735 GLuint pitch, height;
736 int16_t src_x, dst_x;
737 bool ok;
738
739 do {
740 /* The pitch given to the GPU must be DWORD aligned, and
741 * we want width to match pitch. Max width is (1 << 15 - 1),
742 * rounding that down to the nearest DWORD is 1 << 15 - 4
743 */
744 pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 64), 4);
745 height = (size < pitch || pitch == 0) ? 1 : size / pitch;
746
747 src_x = src_offset % 64;
748 dst_x = dst_offset % 64;
749 pitch = ALIGN(MIN2(size, (1 << 15) - 64), 4);
750 assert(src_x + pitch < 1 << 15);
751 assert(dst_x + pitch < 1 << 15);
752
753 ok = intelEmitCopyBlit(brw, 1,
754 pitch, src_bo, src_offset - src_x,
755 ISL_TILING_LINEAR,
756 pitch, dst_bo, dst_offset - dst_x,
757 ISL_TILING_LINEAR,
758 src_x, 0, /* src x/y */
759 dst_x, 0, /* dst x/y */
760 MIN2(size, pitch), height, /* w, h */
761 GL_COPY);
762 if (!ok) {
763 _mesa_problem(ctx, "Failed to linear blit %dx%d\n",
764 MIN2(size, pitch), height);
765 return;
766 }
767
768 pitch *= height;
769 if (size <= pitch)
770 return;
771
772 src_offset += pitch;
773 dst_offset += pitch;
774 size -= pitch;
775 } while (1);
776 }
777
778 /**
779 * Used to initialize the alpha value of an ARGB8888 miptree after copying
780 * into it from an XRGB8888 source.
781 *
782 * This is very common with glCopyTexImage2D(). Note that the coordinates are
783 * relative to the start of the miptree, not relative to a slice within the
784 * miptree.
785 */
786 static void
787 intel_miptree_set_alpha_to_one(struct brw_context *brw,
788 struct intel_mipmap_tree *mt,
789 int x, int y, int width, int height)
790 {
791 uint32_t BR13, CMD;
792 int pitch, cpp;
793
794 pitch = mt->surf.row_pitch;
795 cpp = mt->cpp;
796
797 DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
798 __func__, mt->bo, pitch, x, y, width, height);
799
800 BR13 = br13_for_cpp(cpp) | 0xf0 << 16;
801 CMD = XY_COLOR_BLT_CMD;
802 CMD |= XY_BLT_WRITE_ALPHA;
803
804 if (mt->surf.tiling != ISL_TILING_LINEAR) {
805 CMD |= XY_DST_TILED;
806 pitch /= 4;
807 }
808 BR13 |= pitch;
809
810 /* do space check before going any further */
811 if (!brw_batch_has_aperture_space(brw, mt->bo->size))
812 intel_batchbuffer_flush(brw);
813
814 unsigned length = brw->gen >= 8 ? 7 : 6;
815 const bool dst_y_tiled = mt->surf.tiling == ISL_TILING_Y0;
816
817 /* We need to split the blit into chunks that each fit within the blitter's
818 * restrictions. We can't use a chunk size of 32768 because we need to
819 * ensure that src_tile_x + chunk_size fits. We choose 16384 because it's
820 * a nice round power of two, big enough that performance won't suffer, and
821 * small enough to guarantee everything fits.
822 */
823 const uint32_t max_chunk_size = 16384;
824
825 for (uint32_t chunk_x = 0; chunk_x < width; chunk_x += max_chunk_size) {
826 for (uint32_t chunk_y = 0; chunk_y < height; chunk_y += max_chunk_size) {
827 const uint32_t chunk_w = MIN2(max_chunk_size, width - chunk_x);
828 const uint32_t chunk_h = MIN2(max_chunk_size, height - chunk_y);
829
830 uint32_t offset, tile_x, tile_y;
831 get_blit_intratile_offset_el(brw, mt,
832 x + chunk_x, y + chunk_y,
833 &offset, &tile_x, &tile_y);
834
835 BEGIN_BATCH_BLT_TILED(length, dst_y_tiled, false);
836 OUT_BATCH(CMD | (length - 2));
837 OUT_BATCH(BR13);
838 OUT_BATCH(SET_FIELD(y + chunk_y, BLT_Y) |
839 SET_FIELD(x + chunk_x, BLT_X));
840 OUT_BATCH(SET_FIELD(y + chunk_y + chunk_h, BLT_Y) |
841 SET_FIELD(x + chunk_x + chunk_w, BLT_X));
842 if (brw->gen >= 8) {
843 OUT_RELOC64(mt->bo,
844 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
845 offset);
846 } else {
847 OUT_RELOC(mt->bo,
848 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
849 offset);
850 }
851 OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
852 ADVANCE_BATCH_TILED(dst_y_tiled, false);
853 }
854 }
855
856 brw_emit_mi_flush(brw);
857 }