i965/miptree: Use > 1 instead of > 0 to check for multisampling
[mesa.git] / src / mesa / drivers / dri / i965 / intel_blit.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "main/mtypes.h"
27 #include "main/blit.h"
28 #include "main/context.h"
29 #include "main/enums.h"
30 #include "main/fbobject.h"
31
32 #include "brw_context.h"
33 #include "brw_defines.h"
34 #include "intel_blit.h"
35 #include "intel_buffers.h"
36 #include "intel_fbo.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_mipmap_tree.h"
39
40 #define FILE_DEBUG_FLAG DEBUG_BLIT
41
42 static void
43 intel_miptree_set_alpha_to_one(struct brw_context *brw,
44 struct intel_mipmap_tree *mt,
45 int x, int y, int width, int height);
46
47 static GLuint translate_raster_op(GLenum logicop)
48 {
49 switch(logicop) {
50 case GL_CLEAR: return 0x00;
51 case GL_AND: return 0x88;
52 case GL_AND_REVERSE: return 0x44;
53 case GL_COPY: return 0xCC;
54 case GL_AND_INVERTED: return 0x22;
55 case GL_NOOP: return 0xAA;
56 case GL_XOR: return 0x66;
57 case GL_OR: return 0xEE;
58 case GL_NOR: return 0x11;
59 case GL_EQUIV: return 0x99;
60 case GL_INVERT: return 0x55;
61 case GL_OR_REVERSE: return 0xDD;
62 case GL_COPY_INVERTED: return 0x33;
63 case GL_OR_INVERTED: return 0xBB;
64 case GL_NAND: return 0x77;
65 case GL_SET: return 0xFF;
66 default: return 0;
67 }
68 }
69
70 static uint32_t
71 br13_for_cpp(int cpp)
72 {
73 switch (cpp) {
74 case 16:
75 return BR13_32323232;
76 case 8:
77 return BR13_16161616;
78 case 4:
79 return BR13_8888;
80 case 2:
81 return BR13_565;
82 case 1:
83 return BR13_8;
84 default:
85 unreachable("not reached");
86 }
87 }
88
89 /**
90 * Emits the packet for switching the blitter from X to Y tiled or back.
91 *
92 * This has to be called in a single BEGIN_BATCH_BLT_TILED() /
93 * ADVANCE_BATCH_TILED(). This is because BCS_SWCTRL is saved and restored as
94 * part of the power context, not a render context, and if the batchbuffer was
95 * to get flushed between setting and blitting, or blitting and restoring, our
96 * tiling state would leak into other unsuspecting applications (like the X
97 * server).
98 */
99 static uint32_t *
100 set_blitter_tiling(struct brw_context *brw,
101 bool dst_y_tiled, bool src_y_tiled,
102 uint32_t *__map)
103 {
104 assert(brw->gen >= 6);
105
106 /* Idle the blitter before we update how tiling is interpreted. */
107 OUT_BATCH(MI_FLUSH_DW);
108 OUT_BATCH(0);
109 OUT_BATCH(0);
110 OUT_BATCH(0);
111
112 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
113 OUT_BATCH(BCS_SWCTRL);
114 OUT_BATCH((BCS_SWCTRL_DST_Y | BCS_SWCTRL_SRC_Y) << 16 |
115 (dst_y_tiled ? BCS_SWCTRL_DST_Y : 0) |
116 (src_y_tiled ? BCS_SWCTRL_SRC_Y : 0));
117 return __map;
118 }
119 #define SET_BLITTER_TILING(...) __map = set_blitter_tiling(__VA_ARGS__, __map)
120
121 #define BEGIN_BATCH_BLT_TILED(n, dst_y_tiled, src_y_tiled) \
122 BEGIN_BATCH_BLT(n + ((dst_y_tiled || src_y_tiled) ? 14 : 0)); \
123 if (dst_y_tiled || src_y_tiled) \
124 SET_BLITTER_TILING(brw, dst_y_tiled, src_y_tiled)
125
126 #define ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled) \
127 if (dst_y_tiled || src_y_tiled) \
128 SET_BLITTER_TILING(brw, false, false); \
129 ADVANCE_BATCH()
130
131 static int
132 blt_pitch(struct intel_mipmap_tree *mt)
133 {
134 int pitch = mt->pitch;
135 if (mt->tiling)
136 pitch /= 4;
137 return pitch;
138 }
139
140 bool
141 intel_miptree_blit_compatible_formats(mesa_format src, mesa_format dst)
142 {
143 /* The BLT doesn't handle sRGB conversion */
144 assert(src == _mesa_get_srgb_format_linear(src));
145 assert(dst == _mesa_get_srgb_format_linear(dst));
146
147 /* No swizzle or format conversions possible, except... */
148 if (src == dst)
149 return true;
150
151 /* ...we can either discard the alpha channel when going from A->X,
152 * or we can fill the alpha channel with 0xff when going from X->A
153 */
154 if (src == MESA_FORMAT_B8G8R8A8_UNORM || src == MESA_FORMAT_B8G8R8X8_UNORM)
155 return (dst == MESA_FORMAT_B8G8R8A8_UNORM ||
156 dst == MESA_FORMAT_B8G8R8X8_UNORM);
157
158 if (src == MESA_FORMAT_R8G8B8A8_UNORM || src == MESA_FORMAT_R8G8B8X8_UNORM)
159 return (dst == MESA_FORMAT_R8G8B8A8_UNORM ||
160 dst == MESA_FORMAT_R8G8B8X8_UNORM);
161
162 return false;
163 }
164
165 static void
166 get_blit_intratile_offset_el(const struct brw_context *brw,
167 struct intel_mipmap_tree *mt,
168 uint32_t total_x_offset_el,
169 uint32_t total_y_offset_el,
170 uint32_t *base_address_offset,
171 uint32_t *x_offset_el,
172 uint32_t *y_offset_el)
173 {
174 enum isl_tiling tiling = intel_miptree_get_isl_tiling(mt);
175 isl_tiling_get_intratile_offset_el(tiling, mt->cpp * 8, mt->pitch,
176 total_x_offset_el, total_y_offset_el,
177 base_address_offset,
178 x_offset_el, y_offset_el);
179 if (tiling == ISL_TILING_LINEAR) {
180 /* From the Broadwell PRM docs for XY_SRC_COPY_BLT::SourceBaseAddress:
181 *
182 * "Base address of the destination surface: X=0, Y=0. Lower 32bits
183 * of the 48bit addressing. When Src Tiling is enabled (Bit_15
184 * enabled), this address must be 4KB-aligned. When Tiling is not
185 * enabled, this address should be CL (64byte) aligned."
186 *
187 * The offsets we get from ISL in the tiled case are already aligned.
188 * In the linear case, we need to do some of our own aligning.
189 */
190 assert(mt->pitch % 64 == 0);
191 uint32_t delta = *base_address_offset & 63;
192 assert(delta % mt->cpp == 0);
193 *base_address_offset -= delta;
194 *x_offset_el += delta / mt->cpp;
195 } else {
196 assert(*base_address_offset % 4096 == 0);
197 }
198 }
199
200 static bool
201 emit_miptree_blit(struct brw_context *brw,
202 struct intel_mipmap_tree *src_mt,
203 uint32_t src_x, uint32_t src_y,
204 struct intel_mipmap_tree *dst_mt,
205 uint32_t dst_x, uint32_t dst_y,
206 uint32_t width, uint32_t height,
207 bool reverse, GLenum logicop)
208 {
209 /* According to the Ivy Bridge PRM, Vol1 Part4, section 1.2.1.2 (Graphics
210 * Data Size Limitations):
211 *
212 * The BLT engine is capable of transferring very large quantities of
213 * graphics data. Any graphics data read from and written to the
214 * destination is permitted to represent a number of pixels that
215 * occupies up to 65,536 scan lines and up to 32,768 bytes per scan line
216 * at the destination. The maximum number of pixels that may be
217 * represented per scan line’s worth of graphics data depends on the
218 * color depth.
219 *
220 * The blitter's pitch is a signed 16-bit integer, but measured in bytes
221 * for linear surfaces and DWords for tiled surfaces. So the maximum
222 * pitch is 32k linear and 128k tiled.
223 */
224 if (blt_pitch(src_mt) >= 32768 || blt_pitch(dst_mt) >= 32768) {
225 perf_debug("Falling back due to >= 32k/128k pitch\n");
226 return false;
227 }
228
229 /* We need to split the blit into chunks that each fit within the blitter's
230 * restrictions. We can't use a chunk size of 32768 because we need to
231 * ensure that src_tile_x + chunk_size fits. We choose 16384 because it's
232 * a nice round power of two, big enough that performance won't suffer, and
233 * small enough to guarantee everything fits.
234 */
235 const uint32_t max_chunk_size = 16384;
236
237 for (uint32_t chunk_x = 0; chunk_x < width; chunk_x += max_chunk_size) {
238 for (uint32_t chunk_y = 0; chunk_y < height; chunk_y += max_chunk_size) {
239 const uint32_t chunk_w = MIN2(max_chunk_size, width - chunk_x);
240 const uint32_t chunk_h = MIN2(max_chunk_size, height - chunk_y);
241
242 uint32_t src_offset, src_tile_x, src_tile_y;
243 get_blit_intratile_offset_el(brw, src_mt,
244 src_x + chunk_x, src_y + chunk_y,
245 &src_offset, &src_tile_x, &src_tile_y);
246
247 uint32_t dst_offset, dst_tile_x, dst_tile_y;
248 get_blit_intratile_offset_el(brw, dst_mt,
249 dst_x + chunk_x, dst_y + chunk_y,
250 &dst_offset, &dst_tile_x, &dst_tile_y);
251
252 if (!intelEmitCopyBlit(brw,
253 src_mt->cpp,
254 reverse ? -src_mt->pitch : src_mt->pitch,
255 src_mt->bo, src_mt->offset + src_offset,
256 src_mt->tiling,
257 dst_mt->pitch,
258 dst_mt->bo, dst_mt->offset + dst_offset,
259 dst_mt->tiling,
260 src_tile_x, src_tile_y,
261 dst_tile_x, dst_tile_y,
262 chunk_w, chunk_h,
263 logicop)) {
264 /* If this is ever going to fail, it will fail on the first chunk */
265 assert(chunk_x == 0 && chunk_y == 0);
266 return false;
267 }
268 }
269 }
270
271 return true;
272 }
273
274 /**
275 * Implements a rectangular block transfer (blit) of pixels between two
276 * miptrees.
277 *
278 * Our blitter can operate on 1, 2, or 4-byte-per-pixel data, with generous,
279 * but limited, pitches and sizes allowed.
280 *
281 * The src/dst coordinates are relative to the given level/slice of the
282 * miptree.
283 *
284 * If @src_flip or @dst_flip is set, then the rectangle within that miptree
285 * will be inverted (including scanline order) when copying. This is common
286 * in GL when copying between window system and user-created
287 * renderbuffers/textures.
288 */
289 bool
290 intel_miptree_blit(struct brw_context *brw,
291 struct intel_mipmap_tree *src_mt,
292 int src_level, int src_slice,
293 uint32_t src_x, uint32_t src_y, bool src_flip,
294 struct intel_mipmap_tree *dst_mt,
295 int dst_level, int dst_slice,
296 uint32_t dst_x, uint32_t dst_y, bool dst_flip,
297 uint32_t width, uint32_t height,
298 GLenum logicop)
299 {
300 /* The blitter doesn't understand multisampling at all. */
301 if (src_mt->num_samples > 1 || dst_mt->num_samples > 1)
302 return false;
303
304 /* No sRGB decode or encode is done by the hardware blitter, which is
305 * consistent with what we want in many callers (glCopyTexSubImage(),
306 * texture validation, etc.).
307 */
308 mesa_format src_format = _mesa_get_srgb_format_linear(src_mt->format);
309 mesa_format dst_format = _mesa_get_srgb_format_linear(dst_mt->format);
310
311 /* The blitter doesn't support doing any format conversions. We do also
312 * support blitting ARGB8888 to XRGB8888 (trivial, the values dropped into
313 * the X channel don't matter), and XRGB8888 to ARGB8888 by setting the A
314 * channel to 1.0 at the end.
315 */
316 if (!intel_miptree_blit_compatible_formats(src_format, dst_format)) {
317 perf_debug("%s: Can't use hardware blitter from %s to %s, "
318 "falling back.\n", __func__,
319 _mesa_get_format_name(src_format),
320 _mesa_get_format_name(dst_format));
321 return false;
322 }
323
324 /* The blitter has no idea about HiZ or fast color clears, so we need to
325 * resolve the miptrees before we do anything.
326 */
327 intel_miptree_access_raw(brw, src_mt, src_level, src_slice, false);
328 intel_miptree_access_raw(brw, dst_mt, dst_level, dst_slice, true);
329
330 if (src_flip)
331 src_y = minify(src_mt->physical_height0, src_level - src_mt->first_level) - src_y - height;
332
333 if (dst_flip)
334 dst_y = minify(dst_mt->physical_height0, dst_level - dst_mt->first_level) - dst_y - height;
335
336 uint32_t src_image_x, src_image_y, dst_image_x, dst_image_y;
337 intel_miptree_get_image_offset(src_mt, src_level, src_slice,
338 &src_image_x, &src_image_y);
339 intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice,
340 &dst_image_x, &dst_image_y);
341 src_x += src_image_x;
342 src_y += src_image_y;
343 dst_x += dst_image_x;
344 dst_y += dst_image_y;
345
346 if (!emit_miptree_blit(brw, src_mt, src_x, src_y,
347 dst_mt, dst_x, dst_y, width, height,
348 src_flip != dst_flip, logicop)) {
349 return false;
350 }
351
352 /* XXX This could be done in a single pass using XY_FULL_MONO_PATTERN_BLT */
353 if (_mesa_get_format_bits(src_format, GL_ALPHA_BITS) == 0 &&
354 _mesa_get_format_bits(dst_format, GL_ALPHA_BITS) > 0) {
355 intel_miptree_set_alpha_to_one(brw, dst_mt,
356 dst_x, dst_y,
357 width, height);
358 }
359
360 return true;
361 }
362
363 bool
364 intel_miptree_copy(struct brw_context *brw,
365 struct intel_mipmap_tree *src_mt,
366 int src_level, int src_slice,
367 uint32_t src_x, uint32_t src_y,
368 struct intel_mipmap_tree *dst_mt,
369 int dst_level, int dst_slice,
370 uint32_t dst_x, uint32_t dst_y,
371 uint32_t src_width, uint32_t src_height)
372 {
373 /* The blitter doesn't understand multisampling at all. */
374 if (src_mt->num_samples > 1 || dst_mt->num_samples > 1)
375 return false;
376
377 if (src_mt->format == MESA_FORMAT_S_UINT8)
378 return false;
379
380 /* The blitter has no idea about HiZ or fast color clears, so we need to
381 * resolve the miptrees before we do anything.
382 */
383 intel_miptree_access_raw(brw, src_mt, src_level, src_slice, false);
384 intel_miptree_access_raw(brw, dst_mt, dst_level, dst_slice, true);
385
386 uint32_t src_image_x, src_image_y;
387 intel_miptree_get_image_offset(src_mt, src_level, src_slice,
388 &src_image_x, &src_image_y);
389
390 if (_mesa_is_format_compressed(src_mt->format)) {
391 GLuint bw, bh;
392 _mesa_get_format_block_size(src_mt->format, &bw, &bh);
393
394 /* Compressed textures need not have dimensions that are a multiple of
395 * the block size. Rectangles in compressed textures do need to be a
396 * multiple of the block size. The one exception is that the right and
397 * bottom edges may be at the right or bottom edge of the miplevel even
398 * if it's not aligned.
399 */
400 assert(src_x % bw == 0);
401 assert(src_y % bh == 0);
402 assert(src_width % bw == 0 ||
403 src_x + src_width == minify(src_mt->logical_width0, src_level));
404 assert(src_height % bh == 0 ||
405 src_y + src_height == minify(src_mt->logical_height0, src_level));
406
407 src_x /= (int)bw;
408 src_y /= (int)bh;
409 src_width = DIV_ROUND_UP(src_width, (int)bw);
410 src_height = DIV_ROUND_UP(src_height, (int)bh);
411 }
412 src_x += src_image_x;
413 src_y += src_image_y;
414
415 uint32_t dst_image_x, dst_image_y;
416 intel_miptree_get_image_offset(dst_mt, dst_level, dst_slice,
417 &dst_image_x, &dst_image_y);
418
419 if (_mesa_is_format_compressed(dst_mt->format)) {
420 GLuint bw, bh;
421 _mesa_get_format_block_size(dst_mt->format, &bw, &bh);
422
423 assert(dst_x % bw == 0);
424 assert(dst_y % bh == 0);
425
426 dst_x /= (int)bw;
427 dst_y /= (int)bh;
428 }
429 dst_x += dst_image_x;
430 dst_y += dst_image_y;
431
432 return emit_miptree_blit(brw, src_mt, src_x, src_y,
433 dst_mt, dst_x, dst_y,
434 src_width, src_height, false, GL_COPY);
435 }
436
437 static bool
438 alignment_valid(struct brw_context *brw, unsigned offset, uint32_t tiling)
439 {
440 /* Tiled buffers must be page-aligned (4K). */
441 if (tiling != I915_TILING_NONE)
442 return (offset & 4095) == 0;
443
444 /* On Gen8+, linear buffers must be cacheline-aligned. */
445 if (brw->gen >= 8)
446 return (offset & 63) == 0;
447
448 return true;
449 }
450
451 static uint32_t
452 xy_blit_cmd(uint32_t src_tiling, uint32_t dst_tiling, uint32_t cpp)
453 {
454 uint32_t CMD = 0;
455
456 assert(cpp <= 4);
457 switch (cpp) {
458 case 1:
459 case 2:
460 CMD = XY_SRC_COPY_BLT_CMD;
461 break;
462 case 4:
463 CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
464 break;
465 default:
466 unreachable("not reached");
467 }
468
469 if (dst_tiling != I915_TILING_NONE)
470 CMD |= XY_DST_TILED;
471
472 if (src_tiling != I915_TILING_NONE)
473 CMD |= XY_SRC_TILED;
474
475 return CMD;
476 }
477
478 /* Copy BitBlt
479 */
480 bool
481 intelEmitCopyBlit(struct brw_context *brw,
482 GLuint cpp,
483 int32_t src_pitch,
484 struct brw_bo *src_buffer,
485 GLuint src_offset,
486 uint32_t src_tiling,
487 int32_t dst_pitch,
488 struct brw_bo *dst_buffer,
489 GLuint dst_offset,
490 uint32_t dst_tiling,
491 GLshort src_x, GLshort src_y,
492 GLshort dst_x, GLshort dst_y,
493 GLshort w, GLshort h,
494 GLenum logic_op)
495 {
496 GLuint CMD, BR13;
497 int dst_y2 = dst_y + h;
498 int dst_x2 = dst_x + w;
499 bool dst_y_tiled = dst_tiling == I915_TILING_Y;
500 bool src_y_tiled = src_tiling == I915_TILING_Y;
501 uint32_t src_tile_w, src_tile_h;
502 uint32_t dst_tile_w, dst_tile_h;
503
504 if ((dst_y_tiled || src_y_tiled) && brw->gen < 6)
505 return false;
506
507 const unsigned bo_sizes = dst_buffer->size + src_buffer->size;
508
509 /* do space check before going any further */
510 if (!brw_batch_has_aperture_space(brw, bo_sizes))
511 intel_batchbuffer_flush(brw);
512
513 if (!brw_batch_has_aperture_space(brw, bo_sizes))
514 return false;
515
516 unsigned length = brw->gen >= 8 ? 10 : 8;
517
518 intel_batchbuffer_require_space(brw, length * 4, BLT_RING);
519 DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
520 __func__,
521 src_buffer, src_pitch, src_offset, src_x, src_y,
522 dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
523
524 intel_get_tile_dims(src_tiling, cpp, &src_tile_w, &src_tile_h);
525 intel_get_tile_dims(dst_tiling, cpp, &dst_tile_w, &dst_tile_h);
526
527 /* For Tiled surfaces, the pitch has to be a multiple of the Tile width
528 * (X direction width of the Tile). This is ensured while allocating the
529 * buffer object.
530 */
531 assert(src_tiling == I915_TILING_NONE || (src_pitch % src_tile_w) == 0);
532 assert(dst_tiling == I915_TILING_NONE || (dst_pitch % dst_tile_w) == 0);
533
534 /* For big formats (such as floating point), do the copy using 16 or
535 * 32bpp and multiply the coordinates.
536 */
537 if (cpp > 4) {
538 if (cpp % 4 == 2) {
539 dst_x *= cpp / 2;
540 dst_x2 *= cpp / 2;
541 src_x *= cpp / 2;
542 cpp = 2;
543 } else {
544 assert(cpp % 4 == 0);
545 dst_x *= cpp / 4;
546 dst_x2 *= cpp / 4;
547 src_x *= cpp / 4;
548 cpp = 4;
549 }
550 }
551
552 if (!alignment_valid(brw, dst_offset, dst_tiling))
553 return false;
554 if (!alignment_valid(brw, src_offset, src_tiling))
555 return false;
556
557 /* Blit pitch must be dword-aligned. Otherwise, the hardware appears to drop
558 * the low bits. Offsets must be naturally aligned.
559 */
560 if (src_pitch % 4 != 0 || src_offset % cpp != 0 ||
561 dst_pitch % 4 != 0 || dst_offset % cpp != 0)
562 return false;
563
564 assert(cpp <= 4);
565 BR13 = br13_for_cpp(cpp) | translate_raster_op(logic_op) << 16;
566
567 CMD = xy_blit_cmd(src_tiling, dst_tiling, cpp);
568
569 /* For tiled source and destination, pitch value should be specified
570 * as a number of Dwords.
571 */
572 if (dst_tiling != I915_TILING_NONE)
573 dst_pitch /= 4;
574
575 if (src_tiling != I915_TILING_NONE)
576 src_pitch /= 4;
577
578 if (dst_y2 <= dst_y || dst_x2 <= dst_x)
579 return true;
580
581 assert(dst_x < dst_x2);
582 assert(dst_y < dst_y2);
583
584 BEGIN_BATCH_BLT_TILED(length, dst_y_tiled, src_y_tiled);
585 OUT_BATCH(CMD | (length - 2));
586 OUT_BATCH(BR13 | (uint16_t)dst_pitch);
587 OUT_BATCH(SET_FIELD(dst_y, BLT_Y) | SET_FIELD(dst_x, BLT_X));
588 OUT_BATCH(SET_FIELD(dst_y2, BLT_Y) | SET_FIELD(dst_x2, BLT_X));
589 if (brw->gen >= 8) {
590 OUT_RELOC64(dst_buffer,
591 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
592 dst_offset);
593 } else {
594 OUT_RELOC(dst_buffer,
595 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
596 dst_offset);
597 }
598 OUT_BATCH(SET_FIELD(src_y, BLT_Y) | SET_FIELD(src_x, BLT_X));
599 OUT_BATCH((uint16_t)src_pitch);
600 if (brw->gen >= 8) {
601 OUT_RELOC64(src_buffer,
602 I915_GEM_DOMAIN_RENDER, 0,
603 src_offset);
604 } else {
605 OUT_RELOC(src_buffer,
606 I915_GEM_DOMAIN_RENDER, 0,
607 src_offset);
608 }
609
610 ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
611
612 brw_emit_mi_flush(brw);
613
614 return true;
615 }
616
617 bool
618 intelEmitImmediateColorExpandBlit(struct brw_context *brw,
619 GLuint cpp,
620 GLubyte *src_bits, GLuint src_size,
621 GLuint fg_color,
622 GLshort dst_pitch,
623 struct brw_bo *dst_buffer,
624 GLuint dst_offset,
625 uint32_t dst_tiling,
626 GLshort x, GLshort y,
627 GLshort w, GLshort h,
628 GLenum logic_op)
629 {
630 int dwords = ALIGN(src_size, 8) / 4;
631 uint32_t opcode, br13, blit_cmd;
632
633 if (dst_tiling != I915_TILING_NONE) {
634 if (dst_offset & 4095)
635 return false;
636 if (dst_tiling == I915_TILING_Y)
637 return false;
638 }
639
640 assert((logic_op >= GL_CLEAR) && (logic_op <= (GL_CLEAR + 0x0f)));
641 assert(dst_pitch > 0);
642
643 if (w < 0 || h < 0)
644 return true;
645
646 DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d, %d bytes %d dwords\n",
647 __func__,
648 dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
649
650 unsigned xy_setup_blt_length = brw->gen >= 8 ? 10 : 8;
651 intel_batchbuffer_require_space(brw, (xy_setup_blt_length * 4) +
652 (3 * 4) + dwords * 4, BLT_RING);
653
654 opcode = XY_SETUP_BLT_CMD;
655 if (cpp == 4)
656 opcode |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
657 if (dst_tiling != I915_TILING_NONE) {
658 opcode |= XY_DST_TILED;
659 dst_pitch /= 4;
660 }
661
662 br13 = dst_pitch | (translate_raster_op(logic_op) << 16) | (1 << 29);
663 br13 |= br13_for_cpp(cpp);
664
665 blit_cmd = XY_TEXT_IMMEDIATE_BLIT_CMD | XY_TEXT_BYTE_PACKED; /* packing? */
666 if (dst_tiling != I915_TILING_NONE)
667 blit_cmd |= XY_DST_TILED;
668
669 BEGIN_BATCH_BLT(xy_setup_blt_length + 3);
670 OUT_BATCH(opcode | (xy_setup_blt_length - 2));
671 OUT_BATCH(br13);
672 OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
673 OUT_BATCH((100 << 16) | 100); /* clip x2, y2 */
674 if (brw->gen >= 8) {
675 OUT_RELOC64(dst_buffer,
676 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
677 dst_offset);
678 } else {
679 OUT_RELOC(dst_buffer,
680 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
681 dst_offset);
682 }
683 OUT_BATCH(0); /* bg */
684 OUT_BATCH(fg_color); /* fg */
685 OUT_BATCH(0); /* pattern base addr */
686 if (brw->gen >= 8)
687 OUT_BATCH(0);
688
689 OUT_BATCH(blit_cmd | ((3 - 2) + dwords));
690 OUT_BATCH(SET_FIELD(y, BLT_Y) | SET_FIELD(x, BLT_X));
691 OUT_BATCH(SET_FIELD(y + h, BLT_Y) | SET_FIELD(x + w, BLT_X));
692 ADVANCE_BATCH();
693
694 intel_batchbuffer_data(brw, src_bits, dwords * 4, BLT_RING);
695
696 brw_emit_mi_flush(brw);
697
698 return true;
699 }
700
701 /* We don't have a memmove-type blit like some other hardware, so we'll do a
702 * rectangular blit covering a large space, then emit 1-scanline blit at the
703 * end to cover the last if we need.
704 */
705 void
706 intel_emit_linear_blit(struct brw_context *brw,
707 struct brw_bo *dst_bo,
708 unsigned int dst_offset,
709 struct brw_bo *src_bo,
710 unsigned int src_offset,
711 unsigned int size)
712 {
713 struct gl_context *ctx = &brw->ctx;
714 GLuint pitch, height;
715 int16_t src_x, dst_x;
716 bool ok;
717
718 do {
719 /* The pitch given to the GPU must be DWORD aligned, and
720 * we want width to match pitch. Max width is (1 << 15 - 1),
721 * rounding that down to the nearest DWORD is 1 << 15 - 4
722 */
723 pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 64), 4);
724 height = (size < pitch || pitch == 0) ? 1 : size / pitch;
725
726 src_x = src_offset % 64;
727 dst_x = dst_offset % 64;
728 pitch = ALIGN(MIN2(size, (1 << 15) - 64), 4);
729 assert(src_x + pitch < 1 << 15);
730 assert(dst_x + pitch < 1 << 15);
731
732 ok = intelEmitCopyBlit(brw, 1,
733 pitch, src_bo, src_offset - src_x, I915_TILING_NONE,
734 pitch, dst_bo, dst_offset - dst_x, I915_TILING_NONE,
735 src_x, 0, /* src x/y */
736 dst_x, 0, /* dst x/y */
737 MIN2(size, pitch), height, /* w, h */
738 GL_COPY);
739 if (!ok) {
740 _mesa_problem(ctx, "Failed to linear blit %dx%d\n",
741 MIN2(size, pitch), height);
742 return;
743 }
744
745 pitch *= height;
746 if (size <= pitch)
747 return;
748
749 src_offset += pitch;
750 dst_offset += pitch;
751 size -= pitch;
752 } while (1);
753 }
754
755 /**
756 * Used to initialize the alpha value of an ARGB8888 miptree after copying
757 * into it from an XRGB8888 source.
758 *
759 * This is very common with glCopyTexImage2D(). Note that the coordinates are
760 * relative to the start of the miptree, not relative to a slice within the
761 * miptree.
762 */
763 static void
764 intel_miptree_set_alpha_to_one(struct brw_context *brw,
765 struct intel_mipmap_tree *mt,
766 int x, int y, int width, int height)
767 {
768 uint32_t BR13, CMD;
769 int pitch, cpp;
770
771 pitch = mt->pitch;
772 cpp = mt->cpp;
773
774 DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
775 __func__, mt->bo, pitch, x, y, width, height);
776
777 BR13 = br13_for_cpp(cpp) | 0xf0 << 16;
778 CMD = XY_COLOR_BLT_CMD;
779 CMD |= XY_BLT_WRITE_ALPHA;
780
781 if (mt->tiling != I915_TILING_NONE) {
782 CMD |= XY_DST_TILED;
783 pitch /= 4;
784 }
785 BR13 |= pitch;
786
787 /* do space check before going any further */
788 if (!brw_batch_has_aperture_space(brw, mt->bo->size))
789 intel_batchbuffer_flush(brw);
790
791 unsigned length = brw->gen >= 8 ? 7 : 6;
792 bool dst_y_tiled = mt->tiling == I915_TILING_Y;
793
794 /* We need to split the blit into chunks that each fit within the blitter's
795 * restrictions. We can't use a chunk size of 32768 because we need to
796 * ensure that src_tile_x + chunk_size fits. We choose 16384 because it's
797 * a nice round power of two, big enough that performance won't suffer, and
798 * small enough to guarantee everything fits.
799 */
800 const uint32_t max_chunk_size = 16384;
801
802 for (uint32_t chunk_x = 0; chunk_x < width; chunk_x += max_chunk_size) {
803 for (uint32_t chunk_y = 0; chunk_y < height; chunk_y += max_chunk_size) {
804 const uint32_t chunk_w = MIN2(max_chunk_size, width - chunk_x);
805 const uint32_t chunk_h = MIN2(max_chunk_size, height - chunk_y);
806
807 uint32_t offset, tile_x, tile_y;
808 get_blit_intratile_offset_el(brw, mt,
809 x + chunk_x, y + chunk_y,
810 &offset, &tile_x, &tile_y);
811
812 BEGIN_BATCH_BLT_TILED(length, dst_y_tiled, false);
813 OUT_BATCH(CMD | (length - 2));
814 OUT_BATCH(BR13);
815 OUT_BATCH(SET_FIELD(y + chunk_y, BLT_Y) |
816 SET_FIELD(x + chunk_x, BLT_X));
817 OUT_BATCH(SET_FIELD(y + chunk_y + chunk_h, BLT_Y) |
818 SET_FIELD(x + chunk_x + chunk_w, BLT_X));
819 if (brw->gen >= 8) {
820 OUT_RELOC64(mt->bo,
821 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
822 offset);
823 } else {
824 OUT_RELOC(mt->bo,
825 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
826 offset);
827 }
828 OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
829 ADVANCE_BATCH_TILED(dst_y_tiled, false);
830 }
831 }
832
833 brw_emit_mi_flush(brw);
834 }