ilo: move internal shader interface to a new header
[mesa.git] / src / gallium / drivers / r600 / evergreen_hw_context.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 */
26 #include "r600_pipe.h"
27 #include "evergreend.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30
31 void evergreen_flush_vgt_streamout(struct r600_context *ctx)
32 {
33 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
34
35 r600_write_config_reg(cs, R_0084FC_CP_STRMOUT_CNTL, 0);
36
37 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
38 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0);
39
40 cs->buf[cs->cdw++] = PKT3(PKT3_WAIT_REG_MEM, 5, 0);
41 cs->buf[cs->cdw++] = WAIT_REG_MEM_EQUAL; /* wait until the register is equal to the reference value */
42 cs->buf[cs->cdw++] = R_0084FC_CP_STRMOUT_CNTL >> 2; /* register */
43 cs->buf[cs->cdw++] = 0;
44 cs->buf[cs->cdw++] = S_0084FC_OFFSET_UPDATE_DONE(1); /* reference value */
45 cs->buf[cs->cdw++] = S_0084FC_OFFSET_UPDATE_DONE(1); /* mask */
46 cs->buf[cs->cdw++] = 4; /* poll interval */
47 }
48
49 void evergreen_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit)
50 {
51 struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
52
53 if (buffer_enable_bit) {
54 r600_write_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
55 r600_write_value(cs, S_028B94_STREAMOUT_0_EN(1)); /* R_028B94_VGT_STRMOUT_CONFIG */
56 r600_write_value(cs, S_028B98_STREAM_0_BUFFER_EN(buffer_enable_bit)); /* R_028B98_VGT_STRMOUT_BUFFER_CONFIG */
57 } else {
58 r600_write_context_reg(cs, R_028B94_VGT_STRMOUT_CONFIG, S_028B94_STREAMOUT_0_EN(0));
59 }
60 }
61
62 void evergreen_dma_copy(struct r600_context *rctx,
63 struct pipe_resource *dst,
64 struct pipe_resource *src,
65 uint64_t dst_offset,
66 uint64_t src_offset,
67 uint64_t size)
68 {
69 struct radeon_winsys_cs *cs = rctx->rings.dma.cs;
70 unsigned i, ncopy, csize, sub_cmd, shift;
71 struct r600_resource *rdst = (struct r600_resource*)dst;
72 struct r600_resource *rsrc = (struct r600_resource*)src;
73
74 /* make sure that the dma ring is only one active */
75 rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
76 dst_offset += r600_resource_va(&rctx->screen->screen, dst);
77 src_offset += r600_resource_va(&rctx->screen->screen, src);
78
79 /* see if we use dword or byte copy */
80 if (!(dst_offset & 0x3) && !(src_offset & 0x3) && !(size & 0x3)) {
81 size >>= 2;
82 sub_cmd = 0x00;
83 shift = 2;
84 } else {
85 sub_cmd = 0x40;
86 shift = 0;
87 }
88 ncopy = (size / 0x000fffff) + !!(size % 0x000fffff);
89
90 r600_need_dma_space(rctx, ncopy * 5);
91 for (i = 0; i < ncopy; i++) {
92 csize = size < 0x000fffff ? size : 0x000fffff;
93 /* emit reloc before writting cs so that cs is always in consistent state */
94 r600_context_bo_reloc(rctx, &rctx->rings.dma, rsrc, RADEON_USAGE_READ);
95 r600_context_bo_reloc(rctx, &rctx->rings.dma, rdst, RADEON_USAGE_WRITE);
96 cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, sub_cmd, csize);
97 cs->buf[cs->cdw++] = dst_offset & 0xffffffff;
98 cs->buf[cs->cdw++] = src_offset & 0xffffffff;
99 cs->buf[cs->cdw++] = (dst_offset >> 32UL) & 0xff;
100 cs->buf[cs->cdw++] = (src_offset >> 32UL) & 0xff;
101 dst_offset += csize << shift;
102 src_offset += csize << shift;
103 size -= csize;
104 }
105
106 util_range_add(&rdst->valid_buffer_range, dst_offset,
107 dst_offset + size);
108 }
109
110 /* The max number of bytes to copy per packet. */
111 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
112
113 void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
114 struct pipe_resource *dst, uint64_t offset,
115 unsigned size, uint32_t clear_value)
116 {
117 struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
118
119 assert(size);
120 assert(rctx->screen->has_cp_dma);
121
122 offset += r600_resource_va(&rctx->screen->screen, dst);
123
124 /* We flush the caches, because we might read from or write
125 * to resources which are bound right now. */
126 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES |
127 R600_CONTEXT_FLUSH_AND_INV |
128 R600_CONTEXT_FLUSH_AND_INV_CB_META |
129 R600_CONTEXT_FLUSH_AND_INV_DB_META |
130 R600_CONTEXT_STREAMOUT_FLUSH |
131 R600_CONTEXT_WAIT_3D_IDLE;
132
133 while (size) {
134 unsigned sync = 0;
135 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
136 unsigned reloc;
137
138 r600_need_cs_space(rctx, 10 + (rctx->flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE);
139
140 /* Flush the caches for the first copy only. */
141 if (rctx->flags) {
142 r600_flush_emit(rctx);
143 }
144
145 /* Do the synchronization after the last copy, so that all data is written to memory. */
146 if (size == byte_count) {
147 sync = PKT3_CP_DMA_CP_SYNC;
148 }
149
150 /* This must be done after r600_need_cs_space. */
151 reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx,
152 (struct r600_resource*)dst, RADEON_USAGE_WRITE);
153
154 r600_write_value(cs, PKT3(PKT3_CP_DMA, 4, 0));
155 r600_write_value(cs, clear_value); /* DATA [31:0] */
156 r600_write_value(cs, sync | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
157 r600_write_value(cs, offset); /* DST_ADDR_LO [31:0] */
158 r600_write_value(cs, (offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */
159 r600_write_value(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
160
161 r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
162 r600_write_value(cs, reloc);
163
164 size -= byte_count;
165 offset += byte_count;
166 }
167
168 /* Invalidate the read caches. */
169 rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
170
171 util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
172 offset + size);
173 }
174