i965/miptree: Clean-up unused
[mesa.git] / src / mesa / drivers / dri / i965 / intel_blit.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "main/mtypes.h"
27 #include "main/blit.h"
28 #include "main/context.h"
29 #include "main/enums.h"
30 #include "main/fbobject.h"
31
32 #include "brw_context.h"
33 #include "brw_defines.h"
34 #include "intel_blit.h"
35 #include "intel_buffers.h"
36 #include "intel_fbo.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_mipmap_tree.h"
39
40 #define FILE_DEBUG_FLAG DEBUG_BLIT
41
42 static void
43 intel_miptree_set_alpha_to_one(struct brw_context *brw,
44 struct intel_mipmap_tree *mt,
45 int x, int y, int width, int height);
46
47 static GLuint translate_raster_op(GLenum logicop)
48 {
49 switch(logicop) {
50 case GL_CLEAR: return 0x00;
51 case GL_AND: return 0x88;
52 case GL_AND_REVERSE: return 0x44;
53 case GL_COPY: return 0xCC;
54 case GL_AND_INVERTED: return 0x22;
55 case GL_NOOP: return 0xAA;
56 case GL_XOR: return 0x66;
57 case GL_OR: return 0xEE;
58 case GL_NOR: return 0x11;
59 case GL_EQUIV: return 0x99;
60 case GL_INVERT: return 0x55;
61 case GL_OR_REVERSE: return 0xDD;
62 case GL_COPY_INVERTED: return 0x33;
63 case GL_OR_INVERTED: return 0xBB;
64 case GL_NAND: return 0x77;
65 case GL_SET: return 0xFF;
66 default: return 0;
67 }
68 }
69
70 static uint32_t
71 br13_for_cpp(int cpp)
72 {
73 switch (cpp) {
74 case 16:
75 return BR13_32323232;
76 case 8:
77 return BR13_16161616;
78 case 4:
79 return BR13_8888;
80 case 2:
81 return BR13_565;
82 case 1:
83 return BR13_8;
84 default:
85 unreachable("not reached");
86 }
87 }
88
89 /**
90 * Emits the packet for switching the blitter from X to Y tiled or back.
91 *
92 * This has to be called in a single BEGIN_BATCH_BLT_TILED() /
93 * ADVANCE_BATCH_TILED(). This is because BCS_SWCTRL is saved and restored as
94 * part of the power context, not a render context, and if the batchbuffer was
95 * to get flushed between setting and blitting, or blitting and restoring, our
96 * tiling state would leak into other unsuspecting applications (like the X
97 * server).
98 */
99 static uint32_t *
100 set_blitter_tiling(struct brw_context *brw,
101 bool dst_y_tiled, bool src_y_tiled,
102 uint32_t *__map)
103 {
104 assert(brw->gen >= 6);
105
106 /* Idle the blitter before we update how tiling is interpreted. */
107 OUT_BATCH(MI_FLUSH_DW);
108 OUT_BATCH(0);
109 OUT_BATCH(0);
110 OUT_BATCH(0);
111
112 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
113 OUT_BATCH(BCS_SWCTRL);
114 OUT_BATCH((BCS_SWCTRL_DST_Y | BCS_SWCTRL_SRC_Y) << 16 |
115 (dst_y_tiled ? BCS_SWCTRL_DST_Y : 0) |
116 (src_y_tiled ? BCS_SWCTRL_SRC_Y : 0));
117 return __map;
118 }
119 #define SET_BLITTER_TILING(...) __map = set_blitter_tiling(__VA_ARGS__, __map)
120
121 #define BEGIN_BATCH_BLT_TILED(n, dst_y_tiled, src_y_tiled) \
122 BEGIN_BATCH_BLT(n + ((dst_y_tiled || src_y_tiled) ? 14 : 0)); \
123 if (dst_y_tiled || src_y_tiled) \
124 SET_BLITTER_TILING(brw, dst_y_tiled, src_y_tiled)
125
126 #define ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled) \
127 if (dst_y_tiled || src_y_tiled) \
128 SET_BLITTER_TILING(brw, false, false); \
129 ADVANCE_BATCH()
130
131 static int
132 blt_pitch(struct intel_mipmap_tree *mt)
133 {
134 int pitch = mt->surf.row_pitch;
135 if (mt->surf.tiling != ISL_TILING_LINEAR)
136 pitch /= 4;
137 return pitch;
138 }
139
140 bool
141 intel_miptree_blit_compatible_formats(mesa_format src, mesa_format dst)
142 {
143 /* The BLT doesn't handle sRGB conversion */
144 assert(src == _mesa_get_srgb_format_linear(src));
145 assert(dst == _mesa_get_srgb_format_linear(dst));
146
147 /* No swizzle or format conversions possible, except... */
148 if (src == dst)
149 return true;
150
151 /* ...we can either discard the alpha channel when going from A->X,
152 * or we can fill the alpha channel with 0xff when going from X->A
153 */
154 if (src == MESA_FORMAT_B8G8R8A8_UNORM || src == MESA_FORMAT_B8G8R8X8_UNORM)
155 return (dst == MESA_FORMAT_B8G8R8A8_UNORM ||
156 dst == MESA_FORMAT_B8G8R8X8_UNORM);
157
158 if (src == MESA_FORMAT_R8G8B8A8_UNORM || src == MESA_FORMAT_R8G8B8X8_UNORM)
159 return (dst == MESA_FORMAT_R8G8B8A8_UNORM ||
160 dst == MESA_FORMAT_R8G8B8X8_UNORM);
161
162 return false;
163 }
164
165 static void
166 get_blit_intratile_offset_el(const struct brw_context *brw,
167 struct intel_mipmap_tree *mt,
168 uint32_t total_x_offset_el,
169 uint32_t total_y_offset_el,
170 uint32_t *base_address_offset,
171 uint32_t *x_offset_el,
172 uint32_t *y_offset_el)
173 {
174 isl_tiling_get_intratile_offset_el(mt->surf.tiling,
175 mt->cpp * 8, mt->surf.row_pitch,
176 total_x_offset_el, total_y_offset_el,
177 base_address_offset,
178 x_offset_el, y_offset_el);
179 if (mt->surf.tiling == ISL_TILING_LINEAR) {
180 /* From the Broadwell PRM docs for XY_SRC_COPY_BLT::SourceBaseAddress:
181 *
182 * "Base address of the destination surface: X=0, Y=0. Lower 32bits
183 * of the 48bit addressing. When Src Tiling is enabled (Bit_15
184 * enabled), this address must be 4KB-aligned. When Tiling is not
185 * enabled, this address should be CL (64byte) aligned."
186 *
187 * The offsets we get from ISL in the tiled case are already aligned.
188 * In the linear case, we need to do some of our own aligning.
189 */
190 assert(mt->surf.row_pitch % 64 == 0);
191 uint32_t delta = *base_address_offset & 63;
192 assert(delta % mt->cpp == 0);
193 *base_address_offset -= delta;
194 *x_offset_el += delta / mt->cpp;
195 } else {
196 assert(*base_address_offset % 4096 == 0);
197 }
198 }
199
200 static bool
201 emit_miptree_blit(struct brw_context *brw,
202 struct intel_mipmap_tree *src_mt,
203 uint32_t src_x, uint32_t src_y,
204 struct intel_mipmap_tree *dst_mt,
205 uint32_t dst_x, uint32_t dst_y,
206 uint32_t width, uint32_t height,
207 bool reverse, GLenum logicop)
208 {
209 /* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
210 * Data Size Limitations):
211 *
212 * The BLT engine is capable of transferring very large quantities of
213 * graphics data. Any graphics data read from and written to the
214 * destination is permitted to represent a number of pixels that
215 * occupies up to 65,536 scan lines and up to 32,768 bytes per scan line
216 * at the destination. The maximum number of pixels that may be
217 * represented per scan line’s worth of graphics data depends on the
218 * color depth.
219 *
220 * The blitter's pitch is a signed 16-bit integer, but measured in bytes
221 * for linear surfaces and DWords for tiled surfaces. So the maximum
222 * pitch is 32k linear and 128k tiled.
223 */
224 if (blt_pitch(src_mt) >= 32768 || blt_pitch(dst_mt) >= 32768) {
225 perf_debug("Falling back due to >= 32k/128k pitch\n");
226 return false;
227 }
228
229 /* We need to split the blit into chunks that each fit within the blitter's
230 * restrictions. We can't use a chunk size of 32768 because we need to
231 * ensure that src_tile_x + chunk_size fits. We choose 16384 because it's
232 * a nice round power of two, big enough that performance won't suffer, and
233 * small enough to guarantee everything fits.
234 */
235 const uint32_t max_chunk_size = 16384;
236
237 for (uint32_t chunk_x = 0; chunk_x < width; chunk_x += max_chunk_size) {
238 for (uint32_t chunk_y = 0; chunk_y < height; chunk_y += max_chunk_size) {
239 const uint32_t chunk_w = MIN2(max_chunk_size, width - chunk_x);
240 const uint32_t chunk_h = MIN2(max_chunk_size, height - chunk_y);
241
242 uint32_t src_offset, src_tile_x, src_tile_y;
243 get_blit_intratile_offset_el(brw, src_mt,
244 src_x + chunk_x, src_y + chunk_y,
245 &src_offset, &src_tile_x, &src_tile_y);
246
247 uint32_t dst_offset, dst_tile_x, dst_tile_y;
248 get_blit_intratile_offset_el(brw, dst_mt,
249 dst_x + chunk_x, dst_y + chunk_y,
250 &dst_offset, &dst_tile_x, &dst_tile_y);
251
252 if (!intelEmitCopyBlit(brw,
253 src_mt->cpp,
254 reverse ? -src_mt->surf.row_pitch :
255 src_mt->surf.row_pitch,
256 src_mt->bo, src_mt->offset + src_offset,
257 src_mt->surf.tiling,
258 dst_mt->surf.row_pitch,
259 dst_mt->bo, dst_mt->offset + dst_offset,
260 dst_mt->surf.tiling,
261 src_tile_x, src_tile_y,
262 dst_tile_x, dst_tile_y,
263 chunk_w, chunk_h,
264 logicop)) {
265 /* If this is ever going to fail, it will fail on the first chunk */
266 assert(chunk_x == 0 && chunk_y == 0);
267 return false;
268 }
269 }
270 }
271
272 return true;
273 }
274
275 /**
276 * Implements a rectangular block transfer (blit) of pixels between two
277 * miptrees.
278 *
279 * Our blitter can operate on 1, 2, or 4-byte-per-pixel data, with generous,
280 * but limited, pitches and sizes allowed.
281 *
282 * The src/dst coordinates are relative to the given level/slice of the
283 * miptree.
284 *
285 * If @src_flip or @dst_flip is set, then the rectangle within that miptree
286 * will be inverted (including scanline order) when copying. This is common
287 * in GL when copying between window system and user-created
288 * renderbuffers/textures.
289 */
290 bool
291 intel_miptree_blit(struct brw_context *brw,
292 struct intel_mipmap_tree *src_mt,
293 int src_level, int src_slice,
294 uint32_t src_x, uint32_t src_y, bool src_flip,
295 struct intel_mipmap_tree *dst_mt,
296 int dst_level, int dst_slice,
297 uint32_t dst_x, uint32_t dst_y, bool dst_flip,
298 uint32_t width, uint32_t height,
299 GLenum logicop)
300 {
301 /* The blitter doesn't understand multisampling at all. */
302 if (src_mt->surf.samples > 1 || dst_mt->surf.samples > 1)
303 return false;
304
305 /* No sRGB decode or encode is done by the hardware blitter, which is
306 * consistent with what we want in many callers (glCopyTexSubImage(),
307 * texture validation, etc.).
308 */
309 mesa_format src_format = _mesa_get_srgb_format_linear(src_mt->format);
310 mesa_format dst_format = _mesa_get_srgb_format_linear(dst_mt->format);
311
312 /* The blitter doesn't support doing any format conversions. We do also
313 * support blitting ARGB8888 to XRGB8888 (trivial, the values dropped into
314 * the X channel don't matter), and XRGB8888 to ARGB8888 by setting the A
315 * channel to 1.0 at the end.
316 */
317 if (!intel_miptree_blit_compatible_formats(src_format, dst_format)) {
318 perf_debug("%s: Can't use hardware blitter from %s to %s, "
319 "falling back.\n", __func__,
320 _mesa_get_format_name(src_format),
321 _mesa_get_format_name(dst_format));
322 return false;
323 }
324
325 /* The blitter has no idea about HiZ or fast color clears, so we need to
326 * resolve the miptrees before we do anything.
327 */
328 intel_miptree_access_raw(brw, src_mt, src_level, src_slice, false);
329 intel_miptree_access_raw(brw, dst_mt, dst_level, dst_slice, true);
330
331 if (src_flip) {
332 const unsigned h0 = src_mt->surf.phys_level0_sa.height;
333 src_y = minify(h0, src_level - src_mt->first_level) - src_y - height;
334 }
335
336 if (dst_flip) {
337 const unsigned h0 = dst_mt->surf.phys_level0_sa.height;
338 dst_y = minify(h0, dst_level - dst_mt->first_level) - dst_y - height;
339 }
340
341 uint32_t src_image_x, src_image_y, dst_image_x, dst_image_y;
342 intel_miptree_get_image_offset(src_mt, src_level, src_slice,
343 &src_image_x, &src_image_y);
344 intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice,
345 &dst_image_x, &dst_image_y);
346 src_x += src_image_x;
347 src_y += src_image_y;
348 dst_x += dst_image_x;
349 dst_y += dst_image_y;
350
351 if (!emit_miptree_blit(brw, src_mt, src_x, src_y,
352 dst_mt, dst_x, dst_y, width, height,
353 src_flip != dst_flip, logicop)) {
354 return false;
355 }
356
357 /* XXX This could be done in a single pass using XY_FULL_MONO_PATTERN_BLT */
358 if (_mesa_get_format_bits(src_format, GL_ALPHA_BITS) == 0 &&
359 _mesa_get_format_bits(dst_format, GL_ALPHA_BITS) > 0) {
360 intel_miptree_set_alpha_to_one(brw, dst_mt,
361 dst_x, dst_y,
362 width, height);
363 }
364
365 return true;
366 }
367
368 bool
369 intel_miptree_copy(struct brw_context *brw,
370 struct intel_mipmap_tree *src_mt,
371 int src_level, int src_slice,
372 uint32_t src_x, uint32_t src_y,
373 struct intel_mipmap_tree *dst_mt,
374 int dst_level, int dst_slice,
375 uint32_t dst_x, uint32_t dst_y,
376 uint32_t src_width, uint32_t src_height)
377 {
378 /* The blitter doesn't understand multisampling at all. */
379 if (src_mt->surf.samples > 1 || dst_mt->surf.samples > 1)
380 return false;
381
382 if (src_mt->format == MESA_FORMAT_S_UINT8)
383 return false;
384
385 /* The blitter has no idea about HiZ or fast color clears, so we need to
386 * resolve the miptrees before we do anything.
387 */
388 intel_miptree_access_raw(brw, src_mt, src_level, src_slice, false);
389 intel_miptree_access_raw(brw, dst_mt, dst_level, dst_slice, true);
390
391 uint32_t src_image_x, src_image_y;
392 intel_miptree_get_image_offset(src_mt, src_level, src_slice,
393 &src_image_x, &src_image_y);
394
395 if (_mesa_is_format_compressed(src_mt->format)) {
396 GLuint bw, bh;
397 _mesa_get_format_block_size(src_mt->format, &bw, &bh);
398
399 /* Compressed textures need not have dimensions that are a multiple of
400 * the block size. Rectangles in compressed textures do need to be a
401 * multiple of the block size. The one exception is that the right and
402 * bottom edges may be at the right or bottom edge of the miplevel even
403 * if it's not aligned.
404 */
405 assert(src_x % bw == 0);
406 assert(src_y % bh == 0);
407
408 assert(src_width % bw == 0 ||
409 src_x + src_width ==
410 minify(src_mt->surf.logical_level0_px.width, src_level));
411 assert(src_height % bh == 0 ||
412 src_y + src_height ==
413 minify(src_mt->surf.logical_level0_px.height, src_level));
414
415 src_x /= (int)bw;
416 src_y /= (int)bh;
417 src_width = DIV_ROUND_UP(src_width, (int)bw);
418 src_height = DIV_ROUND_UP(src_height, (int)bh);
419 }
420 src_x += src_image_x;
421 src_y += src_image_y;
422
423 uint32_t dst_image_x, dst_image_y;
424 intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice,
425 &dst_image_x, &dst_image_y);
426
427 if (_mesa_is_format_compressed(dst_mt->format)) {
428 GLuint bw, bh;
429 _mesa_get_format_block_size(dst_mt->format, &bw, &bh);
430
431 assert(dst_x % bw == 0);
432 assert(dst_y % bh == 0);
433
434 dst_x /= (int)bw;
435 dst_y /= (int)bh;
436 }
437 dst_x += dst_image_x;
438 dst_y += dst_image_y;
439
440 return emit_miptree_blit(brw, src_mt, src_x, src_y,
441 dst_mt, dst_x, dst_y,
442 src_width, src_height, false, GL_COPY);
443 }
444
445 static bool
446 alignment_valid(struct brw_context *brw, unsigned offset,
447 enum isl_tiling tiling)
448 {
449 /* Tiled buffers must be page-aligned (4K). */
450 if (tiling != ISL_TILING_LINEAR)
451 return (offset & 4095) == 0;
452
453 /* On Gen8+, linear buffers must be cacheline-aligned. */
454 if (brw->gen >= 8)
455 return (offset & 63) == 0;
456
457 return true;
458 }
459
460 static uint32_t
461 xy_blit_cmd(enum isl_tiling src_tiling, enum isl_tiling dst_tiling,
462 uint32_t cpp)
463 {
464 uint32_t CMD = 0;
465
466 assert(cpp <= 4);
467 switch (cpp) {
468 case 1:
469 case 2:
470 CMD = XY_SRC_COPY_BLT_CMD;
471 break;
472 case 4:
473 CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
474 break;
475 default:
476 unreachable("not reached");
477 }
478
479 if (dst_tiling != ISL_TILING_LINEAR)
480 CMD |= XY_DST_TILED;
481
482 if (src_tiling != ISL_TILING_LINEAR)
483 CMD |= XY_SRC_TILED;
484
485 return CMD;
486 }
487
488 /* Copy BitBlt
489 */
490 bool
491 intelEmitCopyBlit(struct brw_context *brw,
492 GLuint cpp,
493 int32_t src_pitch,
494 struct brw_bo *src_buffer,
495 GLuint src_offset,
496 enum isl_tiling src_tiling,
497 int32_t dst_pitch,
498 struct brw_bo *dst_buffer,
499 GLuint dst_offset,
500 enum isl_tiling dst_tiling,
501 GLshort src_x, GLshort src_y,
502 GLshort dst_x, GLshort dst_y,
503 GLshort w, GLshort h,
504 GLenum logic_op)
505 {
506 GLuint CMD, BR13;
507 int dst_y2 = dst_y + h;
508 int dst_x2 = dst_x + w;
509 bool dst_y_tiled = dst_tiling == ISL_TILING_Y0;
510 bool src_y_tiled = src_tiling == ISL_TILING_Y0;
511 uint32_t src_tile_w, src_tile_h;
512 uint32_t dst_tile_w, dst_tile_h;
513
514 if ((dst_y_tiled || src_y_tiled) && brw->gen < 6)
515 return false;
516
517 const unsigned bo_sizes = dst_buffer->size + src_buffer->size;
518
519 /* do space check before going any further */
520 if (!brw_batch_has_aperture_space(brw, bo_sizes))
521 intel_batchbuffer_flush(brw);
522
523 if (!brw_batch_has_aperture_space(brw, bo_sizes))
524 return false;
525
526 unsigned length = brw->gen >= 8 ? 10 : 8;
527
528 intel_batchbuffer_require_space(brw, length * 4, BLT_RING);
529 DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
530 __func__,
531 src_buffer, src_pitch, src_offset, src_x, src_y,
532 dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
533
534 intel_get_tile_dims(src_tiling, cpp, &src_tile_w, &src_tile_h);
535 intel_get_tile_dims(dst_tiling, cpp, &dst_tile_w, &dst_tile_h);
536
537 /* For Tiled surfaces, the pitch has to be a multiple of the Tile width
538 * (X direction width of the Tile). This is ensured while allocating the
539 * buffer object.
540 */
541 assert(src_tiling == ISL_TILING_LINEAR || (src_pitch % src_tile_w) == 0);
542 assert(dst_tiling == ISL_TILING_LINEAR || (dst_pitch % dst_tile_w) == 0);
543
544 /* For big formats (such as floating point), do the copy using 16 or
545 * 32bpp and multiply the coordinates.
546 */
547 if (cpp > 4) {
548 if (cpp % 4 == 2) {
549 dst_x *= cpp / 2;
550 dst_x2 *= cpp / 2;
551 src_x *= cpp / 2;
552 cpp = 2;
553 } else {
554 assert(cpp % 4 == 0);
555 dst_x *= cpp / 4;
556 dst_x2 *= cpp / 4;
557 src_x *= cpp / 4;
558 cpp = 4;
559 }
560 }
561
562 if (!alignment_valid(brw, dst_offset, dst_tiling))
563 return false;
564 if (!alignment_valid(brw, src_offset, src_tiling))
565 return false;
566
567 /* Blit pitch must be dword-aligned. Otherwise, the hardware appears to drop
568 * the low bits. Offsets must be naturally aligned.
569 */
570 if (src_pitch % 4 != 0 || src_offset % cpp != 0 ||
571 dst_pitch % 4 != 0 || dst_offset % cpp != 0)
572 return false;
573
574 assert(cpp <= 4);
575 BR13 = br13_for_cpp(cpp) | translate_raster_op(logic_op) << 16;
576
577 CMD = xy_blit_cmd(src_tiling, dst_tiling, cpp);
578
579 /* For tiled source and destination, pitch value should be specified
580 * as a number of Dwords.
581 */
582 if (dst_tiling != ISL_TILING_LINEAR)
583 dst_pitch /= 4;
584
585 if (src_tiling != ISL_TILING_LINEAR)
586 src_pitch /= 4;
587
588 if (dst_y2 <= dst_y || dst_x2 <= dst_x)
589 return true;
590
591 assert(dst_x < dst_x2);
592 assert(dst_y < dst_y2);
593
594 BEGIN_BATCH_BLT_TILED(length, dst_y_tiled, src_y_tiled);
595 OUT_BATCH(CMD | (length - 2));
596 OUT_BATCH(BR13 | (uint16_t)dst_pitch);
597 OUT_BATCH(SET_FIELD(dst_y, BLT_Y) | SET_FIELD(dst_x, BLT_X));
598 OUT_BATCH(SET_FIELD(dst_y2, BLT_Y) | SET_FIELD(dst_x2, BLT_X));
599 if (brw->gen >= 8) {
600 OUT_RELOC64(dst_buffer,
601 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
602 dst_offset);
603 } else {
604 OUT_RELOC(dst_buffer,
605 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
606 dst_offset);
607 }
608 OUT_BATCH(SET_FIELD(src_y, BLT_Y) | SET_FIELD(src_x, BLT_X));
609 OUT_BATCH((uint16_t)src_pitch);
610 if (brw->gen >= 8) {
611 OUT_RELOC64(src_buffer,
612 I915_GEM_DOMAIN_RENDER, 0,
613 src_offset);
614 } else {
615 OUT_RELOC(src_buffer,
616 I915_GEM_DOMAIN_RENDER, 0,
617 src_offset);
618 }
619
620 ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
621
622 brw_emit_mi_flush(brw);
623
624 return true;
625 }
626
627 bool
628 intelEmitImmediateColorExpandBlit(struct brw_context *brw,
629 GLuint cpp,
630 GLubyte *src_bits, GLuint src_size,
631 GLuint fg_color,
632 GLshort dst_pitch,
633 struct brw_bo *dst_buffer,
634 GLuint dst_offset,
635 enum isl_tiling dst_tiling,
636 GLshort x, GLshort y,
637 GLshort w, GLshort h,
638 GLenum logic_op)
639 {
640 int dwords = ALIGN(src_size, 8) / 4;
641 uint32_t opcode, br13, blit_cmd;
642
643 if (dst_tiling != ISL_TILING_LINEAR) {
644 if (dst_offset & 4095)
645 return false;
646 if (dst_tiling == ISL_TILING_Y0)
647 return false;
648 }
649
650 assert((logic_op >= GL_CLEAR) && (logic_op <= (GL_CLEAR + 0x0f)));
651 assert(dst_pitch > 0);
652
653 if (w < 0 || h < 0)
654 return true;
655
656 DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d, %d bytes %d dwords\n",
657 __func__,
658 dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
659
660 unsigned xy_setup_blt_length = brw->gen >= 8 ? 10 : 8;
661 intel_batchbuffer_require_space(brw, (xy_setup_blt_length * 4) +
662 (3 * 4) + dwords * 4, BLT_RING);
663
664 opcode = XY_SETUP_BLT_CMD;
665 if (cpp == 4)
666 opcode |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
667 if (dst_tiling != ISL_TILING_LINEAR) {
668 opcode |= XY_DST_TILED;
669 dst_pitch /= 4;
670 }
671
672 br13 = dst_pitch | (translate_raster_op(logic_op) << 16) | (1 << 29);
673 br13 |= br13_for_cpp(cpp);
674
675 blit_cmd = XY_TEXT_IMMEDIATE_BLIT_CMD | XY_TEXT_BYTE_PACKED; /* packing? */
676 if (dst_tiling != ISL_TILING_LINEAR)
677 blit_cmd |= XY_DST_TILED;
678
679 BEGIN_BATCH_BLT(xy_setup_blt_length + 3);
680 OUT_BATCH(opcode | (xy_setup_blt_length - 2));
681 OUT_BATCH(br13);
682 OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
683 OUT_BATCH((100 << 16) | 100); /* clip x2, y2 */
684 if (brw->gen >= 8) {
685 OUT_RELOC64(dst_buffer,
686 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
687 dst_offset);
688 } else {
689 OUT_RELOC(dst_buffer,
690 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
691 dst_offset);
692 }
693 OUT_BATCH(0); /* bg */
694 OUT_BATCH(fg_color); /* fg */
695 OUT_BATCH(0); /* pattern base addr */
696 if (brw->gen >= 8)
697 OUT_BATCH(0);
698
699 OUT_BATCH(blit_cmd | ((3 - 2) + dwords));
700 OUT_BATCH(SET_FIELD(y, BLT_Y) | SET_FIELD(x, BLT_X));
701 OUT_BATCH(SET_FIELD(y + h, BLT_Y) | SET_FIELD(x + w, BLT_X));
702 ADVANCE_BATCH();
703
704 intel_batchbuffer_data(brw, src_bits, dwords * 4, BLT_RING);
705
706 brw_emit_mi_flush(brw);
707
708 return true;
709 }
710
711 /* We don't have a memmove-type blit like some other hardware, so we'll do a
712 * rectangular blit covering a large space, then emit 1-scanline blit at the
713 * end to cover the last if we need.
714 */
715 void
716 intel_emit_linear_blit(struct brw_context *brw,
717 struct brw_bo *dst_bo,
718 unsigned int dst_offset,
719 struct brw_bo *src_bo,
720 unsigned int src_offset,
721 unsigned int size)
722 {
723 struct gl_context *ctx = &brw->ctx;
724 GLuint pitch, height;
725 int16_t src_x, dst_x;
726 bool ok;
727
728 do {
729 /* The pitch given to the GPU must be DWORD aligned, and
730 * we want width to match pitch. Max width is (1 << 15 - 1),
731 * rounding that down to the nearest DWORD is 1 << 15 - 4
732 */
733 pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 64), 4);
734 height = (size < pitch || pitch == 0) ? 1 : size / pitch;
735
736 src_x = src_offset % 64;
737 dst_x = dst_offset % 64;
738 pitch = ALIGN(MIN2(size, (1 << 15) - 64), 4);
739 assert(src_x + pitch < 1 << 15);
740 assert(dst_x + pitch < 1 << 15);
741
742 ok = intelEmitCopyBlit(brw, 1,
743 pitch, src_bo, src_offset - src_x,
744 ISL_TILING_LINEAR,
745 pitch, dst_bo, dst_offset - dst_x,
746 ISL_TILING_LINEAR,
747 src_x, 0, /* src x/y */
748 dst_x, 0, /* dst x/y */
749 MIN2(size, pitch), height, /* w, h */
750 GL_COPY);
751 if (!ok) {
752 _mesa_problem(ctx, "Failed to linear blit %dx%d\n",
753 MIN2(size, pitch), height);
754 return;
755 }
756
757 pitch *= height;
758 if (size <= pitch)
759 return;
760
761 src_offset += pitch;
762 dst_offset += pitch;
763 size -= pitch;
764 } while (1);
765 }
766
767 /**
768 * Used to initialize the alpha value of an ARGB8888 miptree after copying
769 * into it from an XRGB8888 source.
770 *
771 * This is very common with glCopyTexImage2D(). Note that the coordinates are
772 * relative to the start of the miptree, not relative to a slice within the
773 * miptree.
774 */
775 static void
776 intel_miptree_set_alpha_to_one(struct brw_context *brw,
777 struct intel_mipmap_tree *mt,
778 int x, int y, int width, int height)
779 {
780 uint32_t BR13, CMD;
781 int pitch, cpp;
782
783 pitch = mt->surf.row_pitch;
784 cpp = mt->cpp;
785
786 DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
787 __func__, mt->bo, pitch, x, y, width, height);
788
789 BR13 = br13_for_cpp(cpp) | 0xf0 << 16;
790 CMD = XY_COLOR_BLT_CMD;
791 CMD |= XY_BLT_WRITE_ALPHA;
792
793 if (mt->surf.tiling != ISL_TILING_LINEAR) {
794 CMD |= XY_DST_TILED;
795 pitch /= 4;
796 }
797 BR13 |= pitch;
798
799 /* do space check before going any further */
800 if (!brw_batch_has_aperture_space(brw, mt->bo->size))
801 intel_batchbuffer_flush(brw);
802
803 unsigned length = brw->gen >= 8 ? 7 : 6;
804 const bool dst_y_tiled = mt->surf.tiling == ISL_TILING_Y0;
805
806 /* We need to split the blit into chunks that each fit within the blitter's
807 * restrictions. We can't use a chunk size of 32768 because we need to
808 * ensure that src_tile_x + chunk_size fits. We choose 16384 because it's
809 * a nice round power of two, big enough that performance won't suffer, and
810 * small enough to guarantee everything fits.
811 */
812 const uint32_t max_chunk_size = 16384;
813
814 for (uint32_t chunk_x = 0; chunk_x < width; chunk_x += max_chunk_size) {
815 for (uint32_t chunk_y = 0; chunk_y < height; chunk_y += max_chunk_size) {
816 const uint32_t chunk_w = MIN2(max_chunk_size, width - chunk_x);
817 const uint32_t chunk_h = MIN2(max_chunk_size, height - chunk_y);
818
819 uint32_t offset, tile_x, tile_y;
820 get_blit_intratile_offset_el(brw, mt,
821 x + chunk_x, y + chunk_y,
822 &offset, &tile_x, &tile_y);
823
824 BEGIN_BATCH_BLT_TILED(length, dst_y_tiled, false);
825 OUT_BATCH(CMD | (length - 2));
826 OUT_BATCH(BR13);
827 OUT_BATCH(SET_FIELD(y + chunk_y, BLT_Y) |
828 SET_FIELD(x + chunk_x, BLT_X));
829 OUT_BATCH(SET_FIELD(y + chunk_y + chunk_h, BLT_Y) |
830 SET_FIELD(x + chunk_x + chunk_w, BLT_X));
831 if (brw->gen >= 8) {
832 OUT_RELOC64(mt->bo,
833 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
834 offset);
835 } else {
836 OUT_RELOC(mt->bo,
837 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
838 offset);
839 }
840 OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
841 ADVANCE_BATCH_TILED(dst_y_tiled, false);
842 }
843 }
844
845 brw_emit_mi_flush(brw);
846 }