i965: Prepare blit engine for isl based miptrees
[mesa.git] / src / mesa / drivers / dri / i965 / intel_blit.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "main/mtypes.h"
27 #include "main/blit.h"
28 #include "main/context.h"
29 #include "main/enums.h"
30 #include "main/fbobject.h"
31
32 #include "brw_context.h"
33 #include "brw_defines.h"
34 #include "intel_blit.h"
35 #include "intel_buffers.h"
36 #include "intel_fbo.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_mipmap_tree.h"
39
40 #define FILE_DEBUG_FLAG DEBUG_BLIT
41
42 static void
43 intel_miptree_set_alpha_to_one(struct brw_context *brw,
44 struct intel_mipmap_tree *mt,
45 int x, int y, int width, int height);
46
47 static GLuint translate_raster_op(GLenum logicop)
48 {
49 switch(logicop) {
50 case GL_CLEAR: return 0x00;
51 case GL_AND: return 0x88;
52 case GL_AND_REVERSE: return 0x44;
53 case GL_COPY: return 0xCC;
54 case GL_AND_INVERTED: return 0x22;
55 case GL_NOOP: return 0xAA;
56 case GL_XOR: return 0x66;
57 case GL_OR: return 0xEE;
58 case GL_NOR: return 0x11;
59 case GL_EQUIV: return 0x99;
60 case GL_INVERT: return 0x55;
61 case GL_OR_REVERSE: return 0xDD;
62 case GL_COPY_INVERTED: return 0x33;
63 case GL_OR_INVERTED: return 0xBB;
64 case GL_NAND: return 0x77;
65 case GL_SET: return 0xFF;
66 default: return 0;
67 }
68 }
69
70 static uint32_t
71 br13_for_cpp(int cpp)
72 {
73 switch (cpp) {
74 case 16:
75 return BR13_32323232;
76 case 8:
77 return BR13_16161616;
78 case 4:
79 return BR13_8888;
80 case 2:
81 return BR13_565;
82 case 1:
83 return BR13_8;
84 default:
85 unreachable("not reached");
86 }
87 }
88
89 /**
90 * Emits the packet for switching the blitter from X to Y tiled or back.
91 *
92 * This has to be called in a single BEGIN_BATCH_BLT_TILED() /
93 * ADVANCE_BATCH_TILED(). This is because BCS_SWCTRL is saved and restored as
94 * part of the power context, not a render context, and if the batchbuffer was
95 * to get flushed between setting and blitting, or blitting and restoring, our
96 * tiling state would leak into other unsuspecting applications (like the X
97 * server).
98 */
99 static uint32_t *
100 set_blitter_tiling(struct brw_context *brw,
101 bool dst_y_tiled, bool src_y_tiled,
102 uint32_t *__map)
103 {
104 assert(brw->gen >= 6);
105
106 /* Idle the blitter before we update how tiling is interpreted. */
107 OUT_BATCH(MI_FLUSH_DW);
108 OUT_BATCH(0);
109 OUT_BATCH(0);
110 OUT_BATCH(0);
111
112 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
113 OUT_BATCH(BCS_SWCTRL);
114 OUT_BATCH((BCS_SWCTRL_DST_Y | BCS_SWCTRL_SRC_Y) << 16 |
115 (dst_y_tiled ? BCS_SWCTRL_DST_Y : 0) |
116 (src_y_tiled ? BCS_SWCTRL_SRC_Y : 0));
117 return __map;
118 }
119 #define SET_BLITTER_TILING(...) __map = set_blitter_tiling(__VA_ARGS__, __map)
120
121 #define BEGIN_BATCH_BLT_TILED(n, dst_y_tiled, src_y_tiled) \
122 BEGIN_BATCH_BLT(n + ((dst_y_tiled || src_y_tiled) ? 14 : 0)); \
123 if (dst_y_tiled || src_y_tiled) \
124 SET_BLITTER_TILING(brw, dst_y_tiled, src_y_tiled)
125
126 #define ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled) \
127 if (dst_y_tiled || src_y_tiled) \
128 SET_BLITTER_TILING(brw, false, false); \
129 ADVANCE_BATCH()
130
131 static int
132 blt_pitch(struct intel_mipmap_tree *mt)
133 {
134 int pitch = mt->surf.row_pitch;
135 if (mt->surf.tiling != ISL_TILING_LINEAR)
136 pitch /= 4;
137 return pitch;
138 }
139
140 bool
141 intel_miptree_blit_compatible_formats(mesa_format src, mesa_format dst)
142 {
143 /* The BLT doesn't handle sRGB conversion */
144 assert(src == _mesa_get_srgb_format_linear(src));
145 assert(dst == _mesa_get_srgb_format_linear(dst));
146
147 /* No swizzle or format conversions possible, except... */
148 if (src == dst)
149 return true;
150
151 /* ...we can either discard the alpha channel when going from A->X,
152 * or we can fill the alpha channel with 0xff when going from X->A
153 */
154 if (src == MESA_FORMAT_B8G8R8A8_UNORM || src == MESA_FORMAT_B8G8R8X8_UNORM)
155 return (dst == MESA_FORMAT_B8G8R8A8_UNORM ||
156 dst == MESA_FORMAT_B8G8R8X8_UNORM);
157
158 if (src == MESA_FORMAT_R8G8B8A8_UNORM || src == MESA_FORMAT_R8G8B8X8_UNORM)
159 return (dst == MESA_FORMAT_R8G8B8A8_UNORM ||
160 dst == MESA_FORMAT_R8G8B8X8_UNORM);
161
162 return false;
163 }
164
165 static void
166 get_blit_intratile_offset_el(const struct brw_context *brw,
167 struct intel_mipmap_tree *mt,
168 uint32_t total_x_offset_el,
169 uint32_t total_y_offset_el,
170 uint32_t *base_address_offset,
171 uint32_t *x_offset_el,
172 uint32_t *y_offset_el)
173 {
174 enum isl_tiling tiling = intel_miptree_get_isl_tiling(mt);
175 isl_tiling_get_intratile_offset_el(tiling, mt->cpp * 8, mt->surf.row_pitch,
176 total_x_offset_el, total_y_offset_el,
177 base_address_offset,
178 x_offset_el, y_offset_el);
179 if (tiling == ISL_TILING_LINEAR) {
180 /* From the Broadwell PRM docs for XY_SRC_COPY_BLT::SourceBaseAddress:
181 *
182 * "Base address of the destination surface: X=0, Y=0. Lower 32bits
183 * of the 48bit addressing. When Src Tiling is enabled (Bit_15
184 * enabled), this address must be 4KB-aligned. When Tiling is not
185 * enabled, this address should be CL (64byte) aligned."
186 *
187 * The offsets we get from ISL in the tiled case are already aligned.
188 * In the linear case, we need to do some of our own aligning.
189 */
190 assert(mt->surf.row_pitch % 64 == 0);
191 uint32_t delta = *base_address_offset & 63;
192 assert(delta % mt->cpp == 0);
193 *base_address_offset -= delta;
194 *x_offset_el += delta / mt->cpp;
195 } else {
196 assert(*base_address_offset % 4096 == 0);
197 }
198 }
199
200 static bool
201 emit_miptree_blit(struct brw_context *brw,
202 struct intel_mipmap_tree *src_mt,
203 uint32_t src_x, uint32_t src_y,
204 struct intel_mipmap_tree *dst_mt,
205 uint32_t dst_x, uint32_t dst_y,
206 uint32_t width, uint32_t height,
207 bool reverse, GLenum logicop)
208 {
209 /* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
210 * Data Size Limitations):
211 *
212 * The BLT engine is capable of transferring very large quantities of
213 * graphics data. Any graphics data read from and written to the
214 * destination is permitted to represent a number of pixels that
215 * occupies up to 65,536 scan lines and up to 32,768 bytes per scan line
216 * at the destination. The maximum number of pixels that may be
217 * represented per scan line’s worth of graphics data depends on the
218 * color depth.
219 *
220 * The blitter's pitch is a signed 16-bit integer, but measured in bytes
221 * for linear surfaces and DWords for tiled surfaces. So the maximum
222 * pitch is 32k linear and 128k tiled.
223 */
224 if (blt_pitch(src_mt) >= 32768 || blt_pitch(dst_mt) >= 32768) {
225 perf_debug("Falling back due to >= 32k/128k pitch\n");
226 return false;
227 }
228
229 /* We need to split the blit into chunks that each fit within the blitter's
230 * restrictions. We can't use a chunk size of 32768 because we need to
231 * ensure that src_tile_x + chunk_size fits. We choose 16384 because it's
232 * a nice round power of two, big enough that performance won't suffer, and
233 * small enough to guarantee everything fits.
234 */
235 const uint32_t max_chunk_size = 16384;
236
237 for (uint32_t chunk_x = 0; chunk_x < width; chunk_x += max_chunk_size) {
238 for (uint32_t chunk_y = 0; chunk_y < height; chunk_y += max_chunk_size) {
239 const uint32_t chunk_w = MIN2(max_chunk_size, width - chunk_x);
240 const uint32_t chunk_h = MIN2(max_chunk_size, height - chunk_y);
241
242 uint32_t src_offset, src_tile_x, src_tile_y;
243 get_blit_intratile_offset_el(brw, src_mt,
244 src_x + chunk_x, src_y + chunk_y,
245 &src_offset, &src_tile_x, &src_tile_y);
246
247 uint32_t dst_offset, dst_tile_x, dst_tile_y;
248 get_blit_intratile_offset_el(brw, dst_mt,
249 dst_x + chunk_x, dst_y + chunk_y,
250 &dst_offset, &dst_tile_x, &dst_tile_y);
251
252 if (!intelEmitCopyBlit(brw,
253 src_mt->cpp,
254 reverse ? -src_mt->surf.row_pitch :
255 src_mt->surf.row_pitch,
256 src_mt->bo, src_mt->offset + src_offset,
257 src_mt->surf.tiling,
258 dst_mt->surf.row_pitch,
259 dst_mt->bo, dst_mt->offset + dst_offset,
260 dst_mt->surf.tiling,
261 src_tile_x, src_tile_y,
262 dst_tile_x, dst_tile_y,
263 chunk_w, chunk_h,
264 logicop)) {
265 /* If this is ever going to fail, it will fail on the first chunk */
266 assert(chunk_x == 0 && chunk_y == 0);
267 return false;
268 }
269 }
270 }
271
272 return true;
273 }
274
275 /**
276 * Implements a rectangular block transfer (blit) of pixels between two
277 * miptrees.
278 *
279 * Our blitter can operate on 1, 2, or 4-byte-per-pixel data, with generous,
280 * but limited, pitches and sizes allowed.
281 *
282 * The src/dst coordinates are relative to the given level/slice of the
283 * miptree.
284 *
285 * If @src_flip or @dst_flip is set, then the rectangle within that miptree
286 * will be inverted (including scanline order) when copying. This is common
287 * in GL when copying between window system and user-created
288 * renderbuffers/textures.
289 */
290 bool
291 intel_miptree_blit(struct brw_context *brw,
292 struct intel_mipmap_tree *src_mt,
293 int src_level, int src_slice,
294 uint32_t src_x, uint32_t src_y, bool src_flip,
295 struct intel_mipmap_tree *dst_mt,
296 int dst_level, int dst_slice,
297 uint32_t dst_x, uint32_t dst_y, bool dst_flip,
298 uint32_t width, uint32_t height,
299 GLenum logicop)
300 {
301 /* The blitter doesn't understand multisampling at all. */
302 if (src_mt->surf.samples > 1 || dst_mt->surf.samples > 1)
303 return false;
304
305 /* No sRGB decode or encode is done by the hardware blitter, which is
306 * consistent with what we want in many callers (glCopyTexSubImage(),
307 * texture validation, etc.).
308 */
309 mesa_format src_format = _mesa_get_srgb_format_linear(src_mt->format);
310 mesa_format dst_format = _mesa_get_srgb_format_linear(dst_mt->format);
311
312 /* The blitter doesn't support doing any format conversions. We do also
313 * support blitting ARGB8888 to XRGB8888 (trivial, the values dropped into
314 * the X channel don't matter), and XRGB8888 to ARGB8888 by setting the A
315 * channel to 1.0 at the end.
316 */
317 if (!intel_miptree_blit_compatible_formats(src_format, dst_format)) {
318 perf_debug("%s: Can't use hardware blitter from %s to %s, "
319 "falling back.\n", __func__,
320 _mesa_get_format_name(src_format),
321 _mesa_get_format_name(dst_format));
322 return false;
323 }
324
325 /* The blitter has no idea about HiZ or fast color clears, so we need to
326 * resolve the miptrees before we do anything.
327 */
328 intel_miptree_access_raw(brw, src_mt, src_level, src_slice, false);
329 intel_miptree_access_raw(brw, dst_mt, dst_level, dst_slice, true);
330
331 if (src_flip) {
332 const unsigned h0 = src_mt->surf.size > 0 ?
333 src_mt->surf.phys_level0_sa.height : src_mt->physical_height0;
334 src_y = minify(h0, src_level - src_mt->first_level) - src_y - height;
335 }
336
337 if (dst_flip) {
338 const unsigned h0 = dst_mt->surf.size > 0 ?
339 dst_mt->surf.phys_level0_sa.height : dst_mt->physical_height0;
340 dst_y = minify(h0, dst_level - dst_mt->first_level) - dst_y - height;
341 }
342
343 uint32_t src_image_x, src_image_y, dst_image_x, dst_image_y;
344 intel_miptree_get_image_offset(src_mt, src_level, src_slice,
345 &src_image_x, &src_image_y);
346 intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice,
347 &dst_image_x, &dst_image_y);
348 src_x += src_image_x;
349 src_y += src_image_y;
350 dst_x += dst_image_x;
351 dst_y += dst_image_y;
352
353 if (!emit_miptree_blit(brw, src_mt, src_x, src_y,
354 dst_mt, dst_x, dst_y, width, height,
355 src_flip != dst_flip, logicop)) {
356 return false;
357 }
358
359 /* XXX This could be done in a single pass using XY_FULL_MONO_PATTERN_BLT */
360 if (_mesa_get_format_bits(src_format, GL_ALPHA_BITS) == 0 &&
361 _mesa_get_format_bits(dst_format, GL_ALPHA_BITS) > 0) {
362 intel_miptree_set_alpha_to_one(brw, dst_mt,
363 dst_x, dst_y,
364 width, height);
365 }
366
367 return true;
368 }
369
370 bool
371 intel_miptree_copy(struct brw_context *brw,
372 struct intel_mipmap_tree *src_mt,
373 int src_level, int src_slice,
374 uint32_t src_x, uint32_t src_y,
375 struct intel_mipmap_tree *dst_mt,
376 int dst_level, int dst_slice,
377 uint32_t dst_x, uint32_t dst_y,
378 uint32_t src_width, uint32_t src_height)
379 {
380 /* The blitter doesn't understand multisampling at all. */
381 if (src_mt->surf.samples > 1 || dst_mt->surf.samples > 1)
382 return false;
383
384 if (src_mt->format == MESA_FORMAT_S_UINT8)
385 return false;
386
387 /* The blitter has no idea about HiZ or fast color clears, so we need to
388 * resolve the miptrees before we do anything.
389 */
390 intel_miptree_access_raw(brw, src_mt, src_level, src_slice, false);
391 intel_miptree_access_raw(brw, dst_mt, dst_level, dst_slice, true);
392
393 uint32_t src_image_x, src_image_y;
394 intel_miptree_get_image_offset(src_mt, src_level, src_slice,
395 &src_image_x, &src_image_y);
396
397 if (_mesa_is_format_compressed(src_mt->format)) {
398 GLuint bw, bh;
399 _mesa_get_format_block_size(src_mt->format, &bw, &bh);
400
401 /* Compressed textures need not have dimensions that are a multiple of
402 * the block size. Rectangles in compressed textures do need to be a
403 * multiple of the block size. The one exception is that the right and
404 * bottom edges may be at the right or bottom edge of the miplevel even
405 * if it's not aligned.
406 */
407 assert(src_x % bw == 0);
408 assert(src_y % bh == 0);
409 assert(src_width % bw == 0 ||
410 src_x + src_width == minify(src_mt->logical_width0, src_level));
411 assert(src_height % bh == 0 ||
412 src_y + src_height == minify(src_mt->logical_height0, src_level));
413
414 src_x /= (int)bw;
415 src_y /= (int)bh;
416 src_width = DIV_ROUND_UP(src_width, (int)bw);
417 src_height = DIV_ROUND_UP(src_height, (int)bh);
418 }
419 src_x += src_image_x;
420 src_y += src_image_y;
421
422 uint32_t dst_image_x, dst_image_y;
423 intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice,
424 &dst_image_x, &dst_image_y);
425
426 if (_mesa_is_format_compressed(dst_mt->format)) {
427 GLuint bw, bh;
428 _mesa_get_format_block_size(dst_mt->format, &bw, &bh);
429
430 assert(dst_x % bw == 0);
431 assert(dst_y % bh == 0);
432
433 dst_x /= (int)bw;
434 dst_y /= (int)bh;
435 }
436 dst_x += dst_image_x;
437 dst_y += dst_image_y;
438
439 return emit_miptree_blit(brw, src_mt, src_x, src_y,
440 dst_mt, dst_x, dst_y,
441 src_width, src_height, false, GL_COPY);
442 }
443
444 static bool
445 alignment_valid(struct brw_context *brw, unsigned offset,
446 enum isl_tiling tiling)
447 {
448 /* Tiled buffers must be page-aligned (4K). */
449 if (tiling != ISL_TILING_LINEAR)
450 return (offset & 4095) == 0;
451
452 /* On Gen8+, linear buffers must be cacheline-aligned. */
453 if (brw->gen >= 8)
454 return (offset & 63) == 0;
455
456 return true;
457 }
458
459 static uint32_t
460 xy_blit_cmd(enum isl_tiling src_tiling, enum isl_tiling dst_tiling,
461 uint32_t cpp)
462 {
463 uint32_t CMD = 0;
464
465 assert(cpp <= 4);
466 switch (cpp) {
467 case 1:
468 case 2:
469 CMD = XY_SRC_COPY_BLT_CMD;
470 break;
471 case 4:
472 CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
473 break;
474 default:
475 unreachable("not reached");
476 }
477
478 if (dst_tiling != ISL_TILING_LINEAR)
479 CMD |= XY_DST_TILED;
480
481 if (src_tiling != ISL_TILING_LINEAR)
482 CMD |= XY_SRC_TILED;
483
484 return CMD;
485 }
486
487 /* Copy BitBlt
488 */
489 bool
490 intelEmitCopyBlit(struct brw_context *brw,
491 GLuint cpp,
492 int32_t src_pitch,
493 struct brw_bo *src_buffer,
494 GLuint src_offset,
495 enum isl_tiling src_tiling,
496 int32_t dst_pitch,
497 struct brw_bo *dst_buffer,
498 GLuint dst_offset,
499 enum isl_tiling dst_tiling,
500 GLshort src_x, GLshort src_y,
501 GLshort dst_x, GLshort dst_y,
502 GLshort w, GLshort h,
503 GLenum logic_op)
504 {
505 GLuint CMD, BR13;
506 int dst_y2 = dst_y + h;
507 int dst_x2 = dst_x + w;
508 bool dst_y_tiled = dst_tiling == ISL_TILING_Y0;
509 bool src_y_tiled = src_tiling == ISL_TILING_Y0;
510 uint32_t src_tile_w, src_tile_h;
511 uint32_t dst_tile_w, dst_tile_h;
512
513 if ((dst_y_tiled || src_y_tiled) && brw->gen < 6)
514 return false;
515
516 const unsigned bo_sizes = dst_buffer->size + src_buffer->size;
517
518 /* do space check before going any further */
519 if (!brw_batch_has_aperture_space(brw, bo_sizes))
520 intel_batchbuffer_flush(brw);
521
522 if (!brw_batch_has_aperture_space(brw, bo_sizes))
523 return false;
524
525 unsigned length = brw->gen >= 8 ? 10 : 8;
526
527 intel_batchbuffer_require_space(brw, length * 4, BLT_RING);
528 DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
529 __func__,
530 src_buffer, src_pitch, src_offset, src_x, src_y,
531 dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
532
533 intel_get_tile_dims(src_tiling, cpp, &src_tile_w, &src_tile_h);
534 intel_get_tile_dims(dst_tiling, cpp, &dst_tile_w, &dst_tile_h);
535
536 /* For Tiled surfaces, the pitch has to be a multiple of the Tile width
537 * (X direction width of the Tile). This is ensured while allocating the
538 * buffer object.
539 */
540 assert(src_tiling == ISL_TILING_LINEAR || (src_pitch % src_tile_w) == 0);
541 assert(dst_tiling == ISL_TILING_LINEAR || (dst_pitch % dst_tile_w) == 0);
542
543 /* For big formats (such as floating point), do the copy using 16 or
544 * 32bpp and multiply the coordinates.
545 */
546 if (cpp > 4) {
547 if (cpp % 4 == 2) {
548 dst_x *= cpp / 2;
549 dst_x2 *= cpp / 2;
550 src_x *= cpp / 2;
551 cpp = 2;
552 } else {
553 assert(cpp % 4 == 0);
554 dst_x *= cpp / 4;
555 dst_x2 *= cpp / 4;
556 src_x *= cpp / 4;
557 cpp = 4;
558 }
559 }
560
561 if (!alignment_valid(brw, dst_offset, dst_tiling))
562 return false;
563 if (!alignment_valid(brw, src_offset, src_tiling))
564 return false;
565
566 /* Blit pitch must be dword-aligned. Otherwise, the hardware appears to drop
567 * the low bits. Offsets must be naturally aligned.
568 */
569 if (src_pitch % 4 != 0 || src_offset % cpp != 0 ||
570 dst_pitch % 4 != 0 || dst_offset % cpp != 0)
571 return false;
572
573 assert(cpp <= 4);
574 BR13 = br13_for_cpp(cpp) | translate_raster_op(logic_op) << 16;
575
576 CMD = xy_blit_cmd(src_tiling, dst_tiling, cpp);
577
578 /* For tiled source and destination, pitch value should be specified
579 * as a number of Dwords.
580 */
581 if (dst_tiling != ISL_TILING_LINEAR)
582 dst_pitch /= 4;
583
584 if (src_tiling != ISL_TILING_LINEAR)
585 src_pitch /= 4;
586
587 if (dst_y2 <= dst_y || dst_x2 <= dst_x)
588 return true;
589
590 assert(dst_x < dst_x2);
591 assert(dst_y < dst_y2);
592
593 BEGIN_BATCH_BLT_TILED(length, dst_y_tiled, src_y_tiled);
594 OUT_BATCH(CMD | (length - 2));
595 OUT_BATCH(BR13 | (uint16_t)dst_pitch);
596 OUT_BATCH(SET_FIELD(dst_y, BLT_Y) | SET_FIELD(dst_x, BLT_X));
597 OUT_BATCH(SET_FIELD(dst_y2, BLT_Y) | SET_FIELD(dst_x2, BLT_X));
598 if (brw->gen >= 8) {
599 OUT_RELOC64(dst_buffer,
600 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
601 dst_offset);
602 } else {
603 OUT_RELOC(dst_buffer,
604 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
605 dst_offset);
606 }
607 OUT_BATCH(SET_FIELD(src_y, BLT_Y) | SET_FIELD(src_x, BLT_X));
608 OUT_BATCH((uint16_t)src_pitch);
609 if (brw->gen >= 8) {
610 OUT_RELOC64(src_buffer,
611 I915_GEM_DOMAIN_RENDER, 0,
612 src_offset);
613 } else {
614 OUT_RELOC(src_buffer,
615 I915_GEM_DOMAIN_RENDER, 0,
616 src_offset);
617 }
618
619 ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
620
621 brw_emit_mi_flush(brw);
622
623 return true;
624 }
625
626 bool
627 intelEmitImmediateColorExpandBlit(struct brw_context *brw,
628 GLuint cpp,
629 GLubyte *src_bits, GLuint src_size,
630 GLuint fg_color,
631 GLshort dst_pitch,
632 struct brw_bo *dst_buffer,
633 GLuint dst_offset,
634 enum isl_tiling dst_tiling,
635 GLshort x, GLshort y,
636 GLshort w, GLshort h,
637 GLenum logic_op)
638 {
639 int dwords = ALIGN(src_size, 8) / 4;
640 uint32_t opcode, br13, blit_cmd;
641
642 if (dst_tiling != ISL_TILING_LINEAR) {
643 if (dst_offset & 4095)
644 return false;
645 if (dst_tiling == ISL_TILING_Y0)
646 return false;
647 }
648
649 assert((logic_op >= GL_CLEAR) && (logic_op <= (GL_CLEAR + 0x0f)));
650 assert(dst_pitch > 0);
651
652 if (w < 0 || h < 0)
653 return true;
654
655 DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d, %d bytes %d dwords\n",
656 __func__,
657 dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
658
659 unsigned xy_setup_blt_length = brw->gen >= 8 ? 10 : 8;
660 intel_batchbuffer_require_space(brw, (xy_setup_blt_length * 4) +
661 (3 * 4) + dwords * 4, BLT_RING);
662
663 opcode = XY_SETUP_BLT_CMD;
664 if (cpp == 4)
665 opcode |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
666 if (dst_tiling != ISL_TILING_LINEAR) {
667 opcode |= XY_DST_TILED;
668 dst_pitch /= 4;
669 }
670
671 br13 = dst_pitch | (translate_raster_op(logic_op) << 16) | (1 << 29);
672 br13 |= br13_for_cpp(cpp);
673
674 blit_cmd = XY_TEXT_IMMEDIATE_BLIT_CMD | XY_TEXT_BYTE_PACKED; /* packing? */
675 if (dst_tiling != ISL_TILING_LINEAR)
676 blit_cmd |= XY_DST_TILED;
677
678 BEGIN_BATCH_BLT(xy_setup_blt_length + 3);
679 OUT_BATCH(opcode | (xy_setup_blt_length - 2));
680 OUT_BATCH(br13);
681 OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
682 OUT_BATCH((100 << 16) | 100); /* clip x2, y2 */
683 if (brw->gen >= 8) {
684 OUT_RELOC64(dst_buffer,
685 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
686 dst_offset);
687 } else {
688 OUT_RELOC(dst_buffer,
689 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
690 dst_offset);
691 }
692 OUT_BATCH(0); /* bg */
693 OUT_BATCH(fg_color); /* fg */
694 OUT_BATCH(0); /* pattern base addr */
695 if (brw->gen >= 8)
696 OUT_BATCH(0);
697
698 OUT_BATCH(blit_cmd | ((3 - 2) + dwords));
699 OUT_BATCH(SET_FIELD(y, BLT_Y) | SET_FIELD(x, BLT_X));
700 OUT_BATCH(SET_FIELD(y + h, BLT_Y) | SET_FIELD(x + w, BLT_X));
701 ADVANCE_BATCH();
702
703 intel_batchbuffer_data(brw, src_bits, dwords * 4, BLT_RING);
704
705 brw_emit_mi_flush(brw);
706
707 return true;
708 }
709
710 /* We don't have a memmove-type blit like some other hardware, so we'll do a
711 * rectangular blit covering a large space, then emit 1-scanline blit at the
712 * end to cover the last if we need.
713 */
714 void
715 intel_emit_linear_blit(struct brw_context *brw,
716 struct brw_bo *dst_bo,
717 unsigned int dst_offset,
718 struct brw_bo *src_bo,
719 unsigned int src_offset,
720 unsigned int size)
721 {
722 struct gl_context *ctx = &brw->ctx;
723 GLuint pitch, height;
724 int16_t src_x, dst_x;
725 bool ok;
726
727 do {
728 /* The pitch given to the GPU must be DWORD aligned, and
729 * we want width to match pitch. Max width is (1 << 15 - 1),
730 * rounding that down to the nearest DWORD is 1 << 15 - 4
731 */
732 pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 64), 4);
733 height = (size < pitch || pitch == 0) ? 1 : size / pitch;
734
735 src_x = src_offset % 64;
736 dst_x = dst_offset % 64;
737 pitch = ALIGN(MIN2(size, (1 << 15) - 64), 4);
738 assert(src_x + pitch < 1 << 15);
739 assert(dst_x + pitch < 1 << 15);
740
741 ok = intelEmitCopyBlit(brw, 1,
742 pitch, src_bo, src_offset - src_x,
743 ISL_TILING_LINEAR,
744 pitch, dst_bo, dst_offset - dst_x,
745 ISL_TILING_LINEAR,
746 src_x, 0, /* src x/y */
747 dst_x, 0, /* dst x/y */
748 MIN2(size, pitch), height, /* w, h */
749 GL_COPY);
750 if (!ok) {
751 _mesa_problem(ctx, "Failed to linear blit %dx%d\n",
752 MIN2(size, pitch), height);
753 return;
754 }
755
756 pitch *= height;
757 if (size <= pitch)
758 return;
759
760 src_offset += pitch;
761 dst_offset += pitch;
762 size -= pitch;
763 } while (1);
764 }
765
766 /**
767 * Used to initialize the alpha value of an ARGB8888 miptree after copying
768 * into it from an XRGB8888 source.
769 *
770 * This is very common with glCopyTexImage2D(). Note that the coordinates are
771 * relative to the start of the miptree, not relative to a slice within the
772 * miptree.
773 */
774 static void
775 intel_miptree_set_alpha_to_one(struct brw_context *brw,
776 struct intel_mipmap_tree *mt,
777 int x, int y, int width, int height)
778 {
779 uint32_t BR13, CMD;
780 int pitch, cpp;
781
782 pitch = mt->surf.row_pitch;
783 cpp = mt->cpp;
784
785 DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
786 __func__, mt->bo, pitch, x, y, width, height);
787
788 BR13 = br13_for_cpp(cpp) | 0xf0 << 16;
789 CMD = XY_COLOR_BLT_CMD;
790 CMD |= XY_BLT_WRITE_ALPHA;
791
792 if (mt->surf.tiling != ISL_TILING_LINEAR) {
793 CMD |= XY_DST_TILED;
794 pitch /= 4;
795 }
796 BR13 |= pitch;
797
798 /* do space check before going any further */
799 if (!brw_batch_has_aperture_space(brw, mt->bo->size))
800 intel_batchbuffer_flush(brw);
801
802 unsigned length = brw->gen >= 8 ? 7 : 6;
803 const bool dst_y_tiled = mt->surf.tiling == ISL_TILING_Y0;
804
805 /* We need to split the blit into chunks that each fit within the blitter's
806 * restrictions. We can't use a chunk size of 32768 because we need to
807 * ensure that src_tile_x + chunk_size fits. We choose 16384 because it's
808 * a nice round power of two, big enough that performance won't suffer, and
809 * small enough to guarantee everything fits.
810 */
811 const uint32_t max_chunk_size = 16384;
812
813 for (uint32_t chunk_x = 0; chunk_x < width; chunk_x += max_chunk_size) {
814 for (uint32_t chunk_y = 0; chunk_y < height; chunk_y += max_chunk_size) {
815 const uint32_t chunk_w = MIN2(max_chunk_size, width - chunk_x);
816 const uint32_t chunk_h = MIN2(max_chunk_size, height - chunk_y);
817
818 uint32_t offset, tile_x, tile_y;
819 get_blit_intratile_offset_el(brw, mt,
820 x + chunk_x, y + chunk_y,
821 &offset, &tile_x, &tile_y);
822
823 BEGIN_BATCH_BLT_TILED(length, dst_y_tiled, false);
824 OUT_BATCH(CMD | (length - 2));
825 OUT_BATCH(BR13);
826 OUT_BATCH(SET_FIELD(y + chunk_y, BLT_Y) |
827 SET_FIELD(x + chunk_x, BLT_X));
828 OUT_BATCH(SET_FIELD(y + chunk_y + chunk_h, BLT_Y) |
829 SET_FIELD(x + chunk_x + chunk_w, BLT_X));
830 if (brw->gen >= 8) {
831 OUT_RELOC64(mt->bo,
832 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
833 offset);
834 } else {
835 OUT_RELOC(mt->bo,
836 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
837 offset);
838 }
839 OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
840 ADVANCE_BATCH_TILED(dst_y_tiled, false);
841 }
842 }
843
844 brw_emit_mi_flush(brw);
845 }