2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
33 #include <util/u_double_list.h>
34 #include <util/u_inlines.h>
35 #include "util/u_hash_table.h"
36 #include <os/os_thread.h>
47 enum chip_class chip_class
;
48 struct r600_tiling_info tiling_info
;
49 struct r600_bomgr
*bomgr
;
52 struct r600_bo
*fence_bo
;
53 unsigned clock_crystal_freq
;
54 unsigned num_backends
;
55 unsigned minor_version
;
57 /* List of buffer handles and its mutex. */
58 struct util_hash_table
*bo_handles
;
59 pipe_mutex bo_handles_mutex
;
62 #define REG_FLAG_NEED_BO 1
63 #define REG_FLAG_DIRTY_ALWAYS 2
75 struct pipe_reference reference
;
81 struct list_head fencedlist
;
83 struct r600_context
*ctx
;
85 struct r600_reloc
*reloc
;
92 struct pipe_reference reference
;
94 unsigned tiling_flags
;
95 unsigned kernel_pitch
;
100 struct list_head list
;
109 struct radeon
*radeon
;
112 struct list_head delayed
;
113 unsigned num_delayed
;
119 struct radeon
*r600_new(int fd
, unsigned device
);
120 void r600_delete(struct radeon
*r600
);
125 unsigned radeon_family_from_device(unsigned device
);
130 struct radeon_bo
*radeon_bo(struct radeon
*radeon
, unsigned handle
,
131 unsigned size
, unsigned alignment
);
132 void radeon_bo_reference(struct radeon
*radeon
, struct radeon_bo
**dst
,
133 struct radeon_bo
*src
);
134 int radeon_bo_wait(struct radeon
*radeon
, struct radeon_bo
*bo
);
135 int radeon_bo_busy(struct radeon
*radeon
, struct radeon_bo
*bo
, uint32_t *domain
);
136 int radeon_bo_fencelist(struct radeon
*radeon
, struct radeon_bo
**bolist
, uint32_t num_bo
);
137 int radeon_bo_get_tiling_flags(struct radeon
*radeon
,
138 struct radeon_bo
*bo
,
139 uint32_t *tiling_flags
,
141 int radeon_bo_get_name(struct radeon
*radeon
,
142 struct radeon_bo
*bo
,
148 int r600_context_init_fence(struct r600_context
*ctx
);
149 void r600_context_bo_reloc(struct r600_context
*ctx
, u32
*pm4
, struct r600_bo
*rbo
);
150 void r600_context_bo_flush(struct r600_context
*ctx
, unsigned flush_flags
,
151 unsigned flush_mask
, struct r600_bo
*rbo
);
152 struct r600_bo
*r600_context_reg_bo(struct r600_context
*ctx
, unsigned offset
);
153 int r600_context_add_block(struct r600_context
*ctx
, const struct r600_reg
*reg
, unsigned nreg
);
158 void r600_bo_destroy(struct radeon
*radeon
, struct r600_bo
*bo
);
163 struct r600_bomgr
*r600_bomgr_create(struct radeon
*radeon
, unsigned usecs
);
164 void r600_bomgr_destroy(struct r600_bomgr
*mgr
);
165 bool r600_bomgr_bo_destroy(struct r600_bomgr
*mgr
, struct r600_bo
*bo
);
166 void r600_bomgr_bo_init(struct r600_bomgr
*mgr
, struct r600_bo
*bo
);
167 struct r600_bo
*r600_bomgr_bo_create(struct r600_bomgr
*mgr
,
176 #define CTX_RANGE_ID(ctx, offset) (((offset) >> (ctx)->hash_shift) & 255)
177 #define CTX_BLOCK_ID(ctx, offset) ((offset) & ((1 << (ctx)->hash_shift) - 1))
179 static void inline r600_context_reg(struct r600_context
*ctx
,
180 unsigned offset
, unsigned value
,
183 struct r600_range
*range
;
184 struct r600_block
*block
;
187 range
= &ctx
->range
[CTX_RANGE_ID(ctx
, offset
)];
188 block
= range
->blocks
[CTX_BLOCK_ID(ctx
, offset
)];
189 id
= (offset
- block
->start_offset
) >> 2;
190 block
->reg
[id
] &= ~mask
;
191 block
->reg
[id
] |= value
;
192 if (!(block
->status
& R600_BLOCK_STATUS_DIRTY
)) {
193 ctx
->pm4_dirty_cdwords
+= block
->pm4_ndwords
;
194 block
->status
|= R600_BLOCK_STATUS_ENABLED
;
195 block
->status
|= R600_BLOCK_STATUS_DIRTY
;
196 LIST_ADDTAIL(&block
->list
,&ctx
->dirty
);
200 static inline void r600_context_dirty_block(struct r600_context
*ctx
, struct r600_block
*block
,
203 if ((dirty
!= (block
->status
& R600_BLOCK_STATUS_DIRTY
)) || !(block
->status
& R600_BLOCK_STATUS_ENABLED
)) {
204 block
->status
|= R600_BLOCK_STATUS_ENABLED
;
205 block
->status
|= R600_BLOCK_STATUS_DIRTY
;
206 ctx
->pm4_dirty_cdwords
+= block
->pm4_ndwords
+ block
->pm4_flush_ndwords
;
207 LIST_ADDTAIL(&block
->list
,&ctx
->dirty
);
211 static inline void r600_context_block_emit_dirty(struct r600_context
*ctx
, struct r600_block
*block
)
215 for (int j
= 0; j
< block
->nreg
; j
++) {
216 if (block
->pm4_bo_index
[j
]) {
217 /* find relocation */
218 id
= block
->pm4_bo_index
[j
];
219 r600_context_bo_reloc(ctx
,
220 &block
->pm4
[block
->reloc
[id
].bo_pm4_index
],
221 block
->reloc
[id
].bo
);
222 r600_context_bo_flush(ctx
,
223 block
->reloc
[id
].flush_flags
,
224 block
->reloc
[id
].flush_mask
,
225 block
->reloc
[id
].bo
);
228 memcpy(&ctx
->pm4
[ctx
->pm4_cdwords
], block
->pm4
, block
->pm4_ndwords
* 4);
229 ctx
->pm4_cdwords
+= block
->pm4_ndwords
;
230 block
->status
^= R600_BLOCK_STATUS_DIRTY
;
231 LIST_DELINIT(&block
->list
);
237 static inline int radeon_bo_map(struct radeon
*radeon
, struct radeon_bo
*bo
)
243 static inline void radeon_bo_unmap(struct radeon
*radeon
, struct radeon_bo
*bo
)
246 assert(bo
->map_count
>= 0);
252 static inline struct radeon_bo
*r600_bo_get_bo(struct r600_bo
*bo
)
257 static unsigned inline r600_bo_get_handle(struct r600_bo
*bo
)
259 return bo
->bo
->handle
;
262 static unsigned inline r600_bo_get_size(struct r600_bo
*bo
)
270 static inline bool fence_is_after(unsigned fence
, unsigned ofence
)
272 /* handle wrap around */
273 if (fence
< 0x80000000 && ofence
> 0x80000000)