} gp;
struct {
unsigned numColourResults;
- boolean writesDepth;
- boolean earlyFragTests;
- boolean separateFragData;
- boolean usesDiscard;
+ bool writesDepth;
+ bool earlyFragTests;
+ bool separateFragData;
+ bool usesDiscard;
} fp;
struct {
uint32_t inputOffset; /* base address for user args */
int8_t viewportId; /* output index of ViewportIndex */
uint8_t fragDepth; /* output index of FragDepth */
uint8_t sampleMask; /* output index of SampleMask */
- boolean sampleInterp; /* perform sample interp on all fp inputs */
+ bool sampleInterp; /* perform sample interp on all fp inputs */
uint8_t backFaceColor[2]; /* input/output indices of back face colour */
uint8_t globalAccess; /* 1 for read, 2 for wr, 3 for rw */
- boolean fp64; /* program uses fp64 math */
- boolean nv50styleSurfaces; /* generate gX[] access for raw buffers */
+ bool fp64; /* program uses fp64 math */
+ bool nv50styleSurfaces; /* generate gX[] access for raw buffers */
uint8_t resInfoCBSlot; /* cX[] used for tex handles, surface info */
uint16_t texBindBase; /* base address for tex handles (nve4) */
uint16_t suInfoBase; /* base address for surface info (nve4) */
if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
tgsi_dump(tokens, 0);
- mainTempsInLMem = FALSE;
+ mainTempsInLMem = false;
}
Source::~Source()
info->prop.gp.instanceCount = prop->u[0].Data;
break;
case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
- info->prop.fp.separateFragData = TRUE;
+ info->prop.fp.separateFragData = true;
break;
case TGSI_PROPERTY_FS_COORD_ORIGIN:
case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
} else
if (insn.getDst(0).getFile() == TGSI_FILE_TEMPORARY) {
if (insn.getDst(0).isIndirect(0))
- mainTempsInLMem = TRUE;
+ mainTempsInLMem = true;
}
}
Instruction::SrcRegister src = insn.getSrc(s);
if (src.getFile() == TGSI_FILE_TEMPORARY) {
if (src.isIndirect(0))
- mainTempsInLMem = TRUE;
+ mainTempsInLMem = true;
} else
if (src.getFile() == TGSI_FILE_RESOURCE) {
if (src.getIndex(0) == TGSI_RESOURCE_GLOBAL)
i->getSrc(0), i->getSrc(1));
i->setSrc(0, tmp);
i->setSrc(1, NULL);
- return TRUE;
+ return true;
}
//
return (struct nouveau_transfer *)transfer;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_malloc(struct nv04_resource *buf)
{
if (!buf->data)
return !!buf->data;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_allocate(struct nouveau_screen *screen,
struct nv04_resource *buf, unsigned domain)
{
buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
&buf->bo, &buf->offset);
if (!buf->bo)
- return FALSE;
+ return false;
NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
} else {
assert(domain == 0);
if (!nouveau_buffer_malloc(buf))
- return FALSE;
+ return false;
}
buf->domain = domain;
if (buf->bo)
util_range_set_empty(&buf->valid_buffer_range);
- return TRUE;
+ return true;
}
static INLINE void
buf->domain = 0;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_reallocate(struct nouveau_screen *screen,
struct nv04_resource *buf, unsigned domain)
{
*/
static uint8_t *
nouveau_transfer_staging(struct nouveau_context *nv,
- struct nouveau_transfer *tx, boolean permit_pb)
+ struct nouveau_transfer *tx, bool permit_pb)
{
const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK;
const unsigned size = align(tx->base.box.width, 4) + adj;
if (!nv->push_data)
- permit_pb = FALSE;
+ permit_pb = false;
if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) {
tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
* buffer. Also updates buf->data if present.
*
* Maybe just migrate to GART right away if we actually need to do this. */
-static boolean
+static bool
nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
{
struct nv04_resource *buf = nv04_resource(tx->base.resource);
buf->bo, buf->offset + base, buf->domain, size);
if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client))
- return FALSE;
+ return false;
if (buf->data)
memcpy(buf->data + base, tx->map, size);
- return TRUE;
+ return true;
}
static void
struct nv04_resource *buf = nv04_resource(tx->base.resource);
uint8_t *data = tx->map + offset;
const unsigned base = tx->base.box.x + offset;
- const boolean can_cb = !((base | size) & 3);
+ const bool can_cb = !((base | size) & 3);
if (buf->data)
memcpy(data, buf->data + base, size);
/* Does a CPU wait for the buffer's backing data to become reliably accessible
* for write/read by waiting on the buffer's relevant fences.
*/
-static INLINE boolean
+static INLINE bool
nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
{
if (rw == PIPE_TRANSFER_READ) {
if (!buf->fence_wr)
- return TRUE;
+ return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
!nouveau_fence_signalled(buf->fence_wr));
if (!nouveau_fence_wait(buf->fence_wr))
- return FALSE;
+ return false;
} else {
if (!buf->fence)
- return TRUE;
+ return true;
NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
!nouveau_fence_signalled(buf->fence));
if (!nouveau_fence_wait(buf->fence))
- return FALSE;
+ return false;
nouveau_fence_ref(NULL, &buf->fence);
}
nouveau_fence_ref(NULL, &buf->fence_wr);
- return TRUE;
+ return true;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
{
if (rw == PIPE_TRANSFER_READ)
}
/* Creates a cache in system memory of the buffer data. */
-static boolean
+static bool
nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
{
struct nouveau_transfer tx;
- boolean ret;
+ bool ret;
tx.base.resource = &buf->base;
tx.base.box.x = 0;
tx.base.box.width = buf->base.width0;
if (!buf->data)
if (!nouveau_buffer_malloc(buf))
- return FALSE;
+ return false;
if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY))
- return TRUE;
+ return true;
nv->stats.buf_cache_count++;
- if (!nouveau_transfer_staging(nv, &tx, FALSE))
- return FALSE;
+ if (!nouveau_transfer_staging(nv, &tx, false))
+ return false;
ret = nouveau_transfer_read(nv, &tx);
if (ret) {
* resource. This can be useful if we would otherwise have to wait for a read
* operation to complete on this data.
*/
-static INLINE boolean
+static INLINE bool
nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
{
if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
- return FALSE;
+ return false;
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
- return FALSE;
+ return false;
if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
- return FALSE;
+ return false;
return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
}
* back into VRAM on unmap. */
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
- nouveau_transfer_staging(nv, tx, TRUE);
+ nouveau_transfer_staging(nv, tx, true);
} else {
if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
/* The GPU is currently writing to this buffer. Copy its current
align_free(buf->data);
buf->data = NULL;
}
- nouveau_transfer_staging(nv, tx, FALSE);
+ nouveau_transfer_staging(nv, tx, false);
nouveau_transfer_read(nv, tx);
} else {
/* The buffer is currently idle. Create a staging area for writes,
* and make sure that the cached data is up-to-date. */
if (usage & PIPE_TRANSFER_WRITE)
- nouveau_transfer_staging(nv, tx, TRUE);
+ nouveau_transfer_staging(nv, tx, true);
if (!buf->data)
nouveau_buffer_cache(nv, buf);
}
if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
/* The whole range is being discarded, so it doesn't matter what was
* there before. No need to copy anything over. */
- nouveau_transfer_staging(nv, tx, TRUE);
+ nouveau_transfer_staging(nv, tx, true);
map = tx->map;
} else
if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
} else {
/* It is expected that the returned buffer be a representation of the
* data in question, so we must copy it over from the buffer. */
- nouveau_transfer_staging(nv, tx, TRUE);
+ nouveau_transfer_staging(nv, tx, true);
if (tx->map)
memcpy(tx->map, map, box->width);
map = tx->map;
const uint8_t bind = buf->base.bind;
/* make sure we invalidate dedicated caches */
if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
- nv->vbo_dirty = TRUE;
+ nv->vbo_dirty = true;
}
util_range_add(&buf->valid_buffer_range,
{
struct nouveau_screen *screen = nouveau_screen(pscreen);
struct nv04_resource *buffer;
- boolean ret;
+ bool ret;
buffer = CALLOC_STRUCT(nv04_resource);
if (!buffer)
}
ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
- if (ret == FALSE)
+ if (ret == false)
goto fail;
if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
return &buffer->base;
}
-static INLINE boolean
+static INLINE bool
nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
struct nouveau_bo *bo, unsigned offset, unsigned size)
{
if (!nouveau_buffer_malloc(buf))
- return FALSE;
+ return false;
if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client))
- return FALSE;
+ return false;
memcpy(buf->data, (uint8_t *)bo->map + offset, size);
- return TRUE;
+ return true;
}
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
-boolean
+bool
nouveau_buffer_migrate(struct nouveau_context *nv,
struct nv04_resource *buf, const unsigned new_domain)
{
if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
if (!nouveau_buffer_allocate(screen, buf, new_domain))
- return FALSE;
+ return false;
ret = nouveau_bo_map(buf->bo, 0, nv->client);
if (ret)
return ret;
if (new_domain == NOUVEAU_BO_VRAM) {
/* keep a system memory copy of our data in case we hit a fallback */
if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
- return FALSE;
+ return false;
if (nouveau_mesa_debug)
debug_printf("migrating %u KiB to VRAM\n", size / 1024);
}
if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
struct nouveau_transfer tx;
if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
- return FALSE;
+ return false;
tx.base.resource = &buf->base;
tx.base.box.x = 0;
tx.base.box.width = buf->base.width0;
tx.bo = NULL;
tx.map = NULL;
- if (!nouveau_transfer_staging(nv, &tx, FALSE))
- return FALSE;
+ if (!nouveau_transfer_staging(nv, &tx, false))
+ return false;
nouveau_transfer_write(nv, &tx, 0, tx.base.box.width);
nouveau_buffer_transfer_del(nv, &tx);
} else
- return FALSE;
+ return false;
assert(buf->domain == new_domain);
- return TRUE;
+ return true;
}
/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
* We'd like to only allocate @size bytes here, but then we'd have to rebase
* the vertex indices ...
*/
-boolean
+bool
nouveau_user_buffer_upload(struct nouveau_context *nv,
struct nv04_resource *buf,
unsigned base, unsigned size)
buf->base.width0 = base + size;
if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
- return FALSE;
+ return false;
ret = nouveau_bo_map(buf->bo, 0, nv->client);
if (ret)
- return FALSE;
+ return false;
memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
- return TRUE;
+ return true;
}
/* Allocate an extra bo if we can't fit everything we need simultaneously.
* (Could happen for very large user arrays.)
*/
-static INLINE boolean
+static INLINE bool
nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
{
int ret;
/* Continue to next scratch buffer, if available (no wrapping, large enough).
* Allocate it if it has not yet been created.
*/
-static INLINE boolean
+static INLINE bool
nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
{
struct nouveau_bo *bo;
const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap))
- return FALSE;
+ return false;
nv->scratch.id = i;
bo = nv->scratch.bo[i];
if (!bo) {
ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
if (ret)
- return FALSE;
+ return false;
nv->scratch.bo[i] = bo;
}
nv->scratch.current = bo;
return !ret;
}
-static boolean
+static bool
nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
{
- boolean ret;
+ bool ret;
ret = nouveau_scratch_next(nv, min_size);
if (!ret)
struct nv04_resource *dst, unsigned dst_pos,
struct nv04_resource *src, unsigned src_pos, unsigned size);
-boolean
+bool
nouveau_buffer_migrate(struct nouveau_context *,
struct nv04_resource *, unsigned domain);
}
/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
-static INLINE boolean
+static INLINE bool
nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
{
return nv04_resource(resource)->domain != 0;
nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
unsigned bytes, unsigned usage);
-boolean
+bool
nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
unsigned base, unsigned size);
struct nouveau_client *client;
struct nouveau_pushbuf *pushbuf;
- boolean vbo_dirty;
+ bool vbo_dirty;
void (*copy_data)(struct nouveau_context *,
struct nouveau_bo *dst, unsigned, unsigned,
nv->stats.buf_cache_count = 0;
nv->stats.buf_cache_frame |= 1;
if ((nv->stats.buf_cache_frame & 0xf) == 0xf)
- nv->screen->hint_buf_keep_sysmem_copy = TRUE;
+ nv->screen->hint_buf_keep_sysmem_copy = true;
}
}
#include <sched.h>
#endif
-boolean
+bool
nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence,
- boolean emit)
+ bool emit)
{
*fence = CALLOC_STRUCT(nouveau_fence);
if (!*fence)
- return FALSE;
+ return false;
(*fence)->screen = screen;
(*fence)->ref = 1;
if (emit)
nouveau_fence_emit(*fence);
- return TRUE;
+ return true;
}
static void
}
}
-boolean
+bool
nouveau_fence_work(struct nouveau_fence *fence,
void (*func)(void *), void *data)
{
if (!fence || fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
func(data);
- return TRUE;
+ return true;
}
work = CALLOC_STRUCT(nouveau_fence_work);
if (!work)
- return FALSE;
+ return false;
work->func = func;
work->data = data;
LIST_ADD(&work->list, &fence->work);
- return TRUE;
+ return true;
}
void
}
void
-nouveau_fence_update(struct nouveau_screen *screen, boolean flushed)
+nouveau_fence_update(struct nouveau_screen *screen, bool flushed)
{
struct nouveau_fence *fence;
struct nouveau_fence *next = NULL;
#define NOUVEAU_FENCE_MAX_SPINS (1 << 31)
-boolean
+bool
nouveau_fence_signalled(struct nouveau_fence *fence)
{
struct nouveau_screen *screen = fence->screen;
if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
- return TRUE;
+ return true;
if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED)
- nouveau_fence_update(screen, FALSE);
+ nouveau_fence_update(screen, false);
return fence->state == NOUVEAU_FENCE_STATE_SIGNALLED;
}
-boolean
+bool
nouveau_fence_wait(struct nouveau_fence *fence)
{
struct nouveau_screen *screen = fence->screen;
if (fence->state < NOUVEAU_FENCE_STATE_FLUSHED)
if (nouveau_pushbuf_kick(screen->pushbuf, screen->pushbuf->channel))
- return FALSE;
+ return false;
if (fence == screen->fence.current)
nouveau_fence_next(screen);
do {
- nouveau_fence_update(screen, FALSE);
+ nouveau_fence_update(screen, false);
if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
- return TRUE;
+ return true;
if (!spins)
NOUVEAU_DRV_STAT(screen, any_non_kernel_fence_sync_count, 1);
spins++;
fence->sequence,
screen->fence.sequence_ack, screen->fence.sequence);
- return FALSE;
+ return false;
}
void
nouveau_fence_ref(NULL, &screen->fence.current);
- nouveau_fence_new(screen, &screen->fence.current, FALSE);
+ nouveau_fence_new(screen, &screen->fence.current, false);
}
void nouveau_fence_emit(struct nouveau_fence *);
void nouveau_fence_del(struct nouveau_fence *);
-boolean nouveau_fence_new(struct nouveau_screen *, struct nouveau_fence **,
- boolean emit);
-boolean nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
-void nouveau_fence_update(struct nouveau_screen *, boolean flushed);
-void nouveau_fence_next(struct nouveau_screen *);
-boolean nouveau_fence_wait(struct nouveau_fence *);
-boolean nouveau_fence_signalled(struct nouveau_fence *);
+bool nouveau_fence_new(struct nouveau_screen *, struct nouveau_fence **,
+ bool emit);
+bool nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
+void nouveau_fence_update(struct nouveau_screen *, bool flushed);
+void nouveau_fence_next(struct nouveau_screen *);
+bool nouveau_fence_wait(struct nouveau_fence *);
+bool nouveau_fence_signalled(struct nouveau_fence *);
static INLINE void
nouveau_fence_ref(struct nouveau_fence *fence, struct nouveau_fence **ref)
}
-boolean
+bool
nouveau_screen_bo_get_handle(struct pipe_screen *pscreen,
struct nouveau_bo *bo,
unsigned stride,
return nouveau_bo_name_get(bo, &whandle->handle) == 0;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
whandle->handle = bo->handle;
- return TRUE;
+ return true;
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
return nouveau_bo_set_prime(bo, (int *)&whandle->handle) == 0;
} else {
- return FALSE;
+ return false;
}
}
int64_t cpu_gpu_time_delta;
- boolean hint_buf_keep_sysmem_copy;
+ bool hint_buf_keep_sysmem_copy;
unsigned vram_domain;
return (struct nouveau_screen *)pscreen;
}
-boolean nouveau_drm_screen_unref(struct nouveau_screen *screen);
+bool nouveau_drm_screen_unref(struct nouveau_screen *screen);
-boolean
+bool
nouveau_screen_bo_get_handle(struct pipe_screen *pscreen,
struct nouveau_bo *bo,
unsigned stride,
case PIPE_MPEG12_MO_TYPE_DUAL_PRIME: {
base = NV17_MPEG_CMD_CHROMA_MV_HEADER_COUNT_2;
if (forward) {
- nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE, FALSE,
- x, y, mb->PMV[0][0], dec->past, TRUE);
- nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE, TRUE,
- x, y2, mb->PMV[0][0], dec->past, FALSE);
+ nouveau_vpe_mb_mv(dec, base, luma, frame, true, false,
+ x, y, mb->PMV[0][0], dec->past, true);
+ nouveau_vpe_mb_mv(dec, base, luma, frame, true, true,
+ x, y2, mb->PMV[0][0], dec->past, false);
}
if (backward && forward) {
- nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, TRUE,
- x, y, mb->PMV[1][0], dec->future, TRUE);
- nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, FALSE,
- x, y2, mb->PMV[1][1], dec->future, FALSE);
+ nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, true,
+ x, y, mb->PMV[1][0], dec->future, true);
+ nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, false,
+ x, y2, mb->PMV[1][1], dec->future, false);
} else assert(!backward);
break;
}
if (frame)
base |= NV17_MPEG_CMD_CHROMA_MV_HEADER_TYPE_FRAME;
if (forward)
- nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE,
+ nouveau_vpe_mb_mv(dec, base, luma, frame, true,
dec->picture_structure != PIPE_MPEG12_PICTURE_STRUCTURE_FIELD_TOP,
- x, y, mb->PMV[0][0], dec->past, TRUE);
+ x, y, mb->PMV[0][0], dec->past, true);
if (backward && forward)
- nouveau_vpe_mb_mv(dec, base, luma, frame, FALSE,
+ nouveau_vpe_mb_mv(dec, base, luma, frame, false,
dec->picture_structure == PIPE_MPEG12_PICTURE_STRUCTURE_FIELD_TOP,
- x, y, mb->PMV[0][1], dec->future, TRUE);
+ x, y, mb->PMV[0][1], dec->future, true);
else assert(!backward);
break;
}
base |= NV17_MPEG_CMD_CHROMA_MV_HEADER_TYPE_FRAME;
/* frame 16x16 */
if (forward)
- nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE, FALSE,
- x, y, mb->PMV[0][0], dec->past, TRUE);
+ nouveau_vpe_mb_mv(dec, base, luma, frame, true, false,
+ x, y, mb->PMV[0][0], dec->past, true);
if (backward)
- nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, FALSE,
- x, y, mb->PMV[0][1], dec->future, TRUE);
+ nouveau_vpe_mb_mv(dec, base, luma, frame, !forward, false,
+ x, y, mb->PMV[0][1], dec->future, true);
return;
mv2:
if (!frame)
base |= NV17_MPEG_CMD_CHROMA_MV_HEADER_MV_SPLIT_HALF_MB;
if (forward) {
- nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE,
+ nouveau_vpe_mb_mv(dec, base, luma, frame, true,
mb->motion_vertical_field_select & PIPE_MPEG12_FS_FIRST_FORWARD,
- x, y, mb->PMV[0][0], dec->past, TRUE);
- nouveau_vpe_mb_mv(dec, base, luma, frame, TRUE,
+ x, y, mb->PMV[0][0], dec->past, true);
+ nouveau_vpe_mb_mv(dec, base, luma, frame, true,
mb->motion_vertical_field_select & PIPE_MPEG12_FS_SECOND_FORWARD,
- x, y2, mb->PMV[1][0], dec->past, FALSE);
+ x, y2, mb->PMV[1][0], dec->past, false);
}
if (backward) {
nouveau_vpe_mb_mv(dec, base, luma, frame, !forward,
mb->motion_vertical_field_select & PIPE_MPEG12_FS_FIRST_BACKWARD,
- x, y, mb->PMV[0][1], dec->future, TRUE);
+ x, y, mb->PMV[0][1], dec->future, true);
nouveau_vpe_mb_mv(dec, base, luma, frame, !forward,
mb->motion_vertical_field_select & PIPE_MPEG12_FS_SECOND_BACKWARD,
- x, y2, mb->PMV[1][1], dec->future, FALSE);
+ x, y2, mb->PMV[1][1], dec->future, false);
}
}
mb = (const struct pipe_mpeg12_macroblock *)pipe_mb;
for (i = 0; i < num_macroblocks; ++i, mb++) {
if (mb->macroblock_type & PIPE_MPEG12_MB_TYPE_INTRA) {
- nouveau_vpe_mb_dct_header(dec, mb, TRUE);
- nouveau_vpe_mb_dct_header(dec, mb, FALSE);
+ nouveau_vpe_mb_dct_header(dec, mb, true);
+ nouveau_vpe_mb_dct_header(dec, mb, false);
} else {
- nouveau_vpe_mb_mv_header(dec, mb, TRUE);
- nouveau_vpe_mb_dct_header(dec, mb, TRUE);
+ nouveau_vpe_mb_mv_header(dec, mb, true);
+ nouveau_vpe_mb_dct_header(dec, mb, true);
- nouveau_vpe_mb_mv_header(dec, mb, FALSE);
- nouveau_vpe_mb_dct_header(dec, mb, FALSE);
+ nouveau_vpe_mb_mv_header(dec, mb, false);
+ nouveau_vpe_mb_dct_header(dec, mb, false);
}
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
nouveau_vpe_mb_dct_blocks(dec, mb);
return push->end - push->cur;
}
-static INLINE boolean
+static INLINE bool
PUSH_SPACE(struct nouveau_pushbuf *push, uint32_t size)
{
if (PUSH_AVAIL(push) < size)
return nouveau_pushbuf_space(push, size, 0, 0) == 0;
- return TRUE;
+ return true;
}
static INLINE void
struct pipe_framebuffer_state *fb = &nv30->framebuffer;
uint32_t colr = 0, zeta = 0, mode = 0;
- if (!nv30_state_validate(nv30, NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR, TRUE))
+ if (!nv30_state_validate(nv30, NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR, true))
return;
if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) {
screen = &nv30->screen->base;
nouveau_fence_next(screen);
- nouveau_fence_update(screen, TRUE);
+ nouveau_fence_update(screen, true);
if (push->bufctx) {
struct nouveau_bufref *bref;
nv30->config.aniso = NV40_3D_TEX_WRAP_ANISO_MIP_FILTER_OPTIMIZATION_OFF;
- if (debug_get_bool_option("NV30_SWTNL", FALSE))
+ if (debug_get_bool_option("NV30_SWTNL", false))
nv30->draw_flags |= NV30_NEW_SWTNL;
nv30->sample_mask = 0xffff;
unsigned scissor_off;
unsigned num_vtxelts;
int index_bias;
- boolean prim_restart;
+ bool prim_restart;
struct nv30_fragprog *fragprog;
} state;
uint32_t vbo_user;
unsigned vbo_min_index;
unsigned vbo_max_index;
- boolean vbo_push_hint;
+ bool vbo_push_hint;
struct nouveau_heap *blit_vp;
struct pipe_resource *blit_fp;
struct pipe_query *render_cond_query;
unsigned render_cond_mode;
- boolean render_cond_cond;
+ bool render_cond_cond;
};
static INLINE struct nv30_context *
void
nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info);
-boolean
-nv30_state_validate(struct nv30_context *nv30, uint32_t mask, boolean hwtnl);
+bool
+nv30_state_validate(struct nv30_context *nv30, uint32_t mask, bool hwtnl);
void
nv30_state_release(struct nv30_context *nv30);
PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_STREAM,
render->max_vertex_buffer_bytes);
if (!r->buffer)
- return FALSE;
+ return false;
r->offset = 0;
}
- return TRUE;
+ return true;
}
static void *
NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1);
}
- if (!nv30_state_validate(nv30, ~0, FALSE))
+ if (!nv30_state_validate(nv30, ~0, false))
return;
BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1);
}
- if (!nv30_state_validate(nv30, ~0, FALSE))
+ if (!nv30_state_validate(nv30, ~0, false))
return;
BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
[TGSI_SEMANTIC_TEXCOORD] = { EMIT_4F, INTERP_PERSPECTIVE, 8, 7, 0x00004000 },
};
-static boolean
+static bool
vroute_add(struct nv30_render *r, uint attrib, uint sem, uint *idx)
{
struct nv30_screen *screen = r->nv30->screen;
}
if (emit == EMIT_OMIT)
- return FALSE;
+ return false;
draw_emit_vertex_attr(vinfo, emit, vroute[sem].interp, attrib);
format = draw_translate_vinfo_format(emit);
assert(sem == TGSI_SEMANTIC_TEXCOORD);
*idx = 0x00001000 << (result - 8);
}
- return TRUE;
+ return true;
}
-static boolean
+static bool
nv30_render_validate(struct nv30_context *nv30)
{
struct nv30_render *r = nv30_render(nv30->draw->render);
}
if (nouveau_heap_alloc(heap, 16, &r->vertprog, &r->vertprog))
- return FALSE;
+ return false;
}
}
}
vinfo->size /= 4;
- return TRUE;
+ return true;
}
void
draw_set_rasterize_stage(draw, stage);
draw_wide_line_threshold(draw, 10000000.f);
draw_wide_point_threshold(draw, 10000000.f);
- draw_wide_point_sprites(draw, TRUE);
+ draw_wide_point_sprites(draw, true);
nv30->draw = draw;
}
struct nouveau_pushbuf *push = nv30->base.pushbuf;
struct nouveau_object *eng3d = nv30->screen->eng3d;
struct nv30_fragprog *fp = nv30->fragprog.program;
- boolean upload = FALSE;
+ bool upload = false;
int i;
if (!fp->translated) {
if (!fp->translated)
return;
- upload = TRUE;
+ upload = true;
}
/* update constants, also needs to be done on every fp switch as we
if (!memcmp(&fp->insn[off], &cbuf[idx], 4 * 4))
continue;
memcpy(&fp->insn[off], &cbuf[idx], 4 * 4);
- upload = TRUE;
+ upload = true;
}
}
unsigned stride;
if (!mt || !mt->base.bo)
- return FALSE;
+ return false;
stride = mt->level[0].pitch;
}
if (!mt->uniform_pitch)
- mt->swizzled = TRUE;
+ mt->swizzled = true;
size = 0;
for (l = 0; l <= pt->last_level; l++) {
struct translate *translate;
- boolean primitive_restart;
+ bool primitive_restart;
uint32_t prim;
uint32_t restart_index;
};
{
struct push_context ctx;
unsigned i, index_size;
- boolean apply_bias = info->indexed && info->index_bias;
+ bool apply_bias = info->indexed && info->index_bias;
ctx.push = nv30->base.pushbuf;
ctx.translate = nv30->vertex->translate;
} else {
ctx.idxbuf = NULL;
index_size = 0;
- ctx.primitive_restart = FALSE;
+ ctx.primitive_restart = false;
ctx.restart_index = 0;
}
if (ntfy1) {
while (ntfy1[3] & 0xff000000) {
if (!wait)
- return FALSE;
+ return false;
}
switch (q->type) {
}
*res64 = q->result;
- return TRUE;
+ return true;
}
static void
if (!nv30->vtxbuf[i].buffer)
continue;
if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nv30->base.vbo_dirty = TRUE;
+ nv30->base.vbo_dirty = true;
}
if (nv30->idxbuf.buffer &&
nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nv30->base.vbo_dirty = TRUE;
+ nv30->base.vbo_dirty = true;
}
}
struct nv30_miptree_level level[13];
uint32_t uniform_pitch;
uint32_t layer_size;
- boolean swizzled;
+ bool swizzled;
unsigned ms_mode;
unsigned ms_x:1;
unsigned ms_y:1;
unsigned bindings)
{
if (sample_count > 4)
- return FALSE;
+ return false;
if (!(0x00000017 & (1 << sample_count)))
- return FALSE;
+ return false;
if (!util_format_is_supported(format, bindings)) {
- return FALSE;
+ return false;
}
/* transfers & shared are always supported */
nouveau_pushbuf_kick(push, push->channel);
- nouveau_fence_new(&screen->base, &screen->base.fence.current, FALSE);
+ nouveau_fence_new(&screen->base, &screen->base.fence.current, false);
return pscreen;
}
struct tgsi_shader_info info;
struct draw_vertex_shader *draw;
- boolean translated;
+ bool translated;
unsigned enabled_ucps;
uint16_t texcoord[10];
struct tgsi_shader_info info;
struct draw_fragment_shader *draw;
- boolean translated;
+ bool translated;
uint32_t *insn;
unsigned insn_len;
nv30->base.pushbuf->user_priv = &nv30->bufctx;
}
-boolean
-nv30_state_validate(struct nv30_context *nv30, uint32_t mask, boolean hwtnl)
+bool
+nv30_state_validate(struct nv30_context *nv30, uint32_t mask, bool hwtnl)
{
struct nouveau_screen *screen = &nv30->screen->base;
struct nouveau_pushbuf *push = nv30->base.pushbuf;
nouveau_pushbuf_bufctx(push, bctx);
if (nouveau_pushbuf_validate(push)) {
nouveau_pushbuf_bufctx(push, NULL);
- return FALSE;
+ return false;
}
/*XXX*/
}
}
- return TRUE;
+ return true;
}
void
* of different ways.
*/
-static INLINE boolean
+static INLINE bool
nv30_transfer_scaled(struct nv30_rect *src, struct nv30_rect *dst)
{
if (src->x1 - src->x0 != dst->x1 - dst->x0)
- return TRUE;
+ return true;
if (src->y1 - src->y0 != dst->y1 - dst->y0)
- return TRUE;
- return FALSE;
+ return true;
+ return false;
}
-static INLINE boolean
+static INLINE bool
nv30_transfer_blit(XFER_ARGS)
{
if (nv30->screen->eng3d->oclass < NV40_3D_CLASS)
- return FALSE;
+ return false;
if (dst->offset & 63 || dst->pitch & 63 || dst->d > 1)
- return FALSE;
+ return false;
if (dst->w < 2 || dst->h < 2)
- return FALSE;
+ return false;
if (dst->cpp > 4 || (dst->cpp == 1 && !dst->pitch))
- return FALSE;
+ return false;
if (src->cpp > 4)
- return FALSE;
- return TRUE;
+ return false;
+ return true;
}
static INLINE struct nouveau_heap *
PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
}
-static boolean
+static bool
nv30_transfer_sifm(XFER_ARGS)
{
if (!src->pitch || (src->w | src->h) > 1024 || src->w < 2 || src->h < 2)
- return FALSE;
+ return false;
if (src->d > 1 || dst->d > 1)
- return FALSE;
+ return false;
if (dst->offset & 63)
- return FALSE;
+ return false;
if (!dst->pitch) {
if ((dst->w | dst->h) > 2048 || dst->w < 2 || dst->h < 2)
- return FALSE;
+ return false;
} else {
if (dst->domain != NOUVEAU_BO_VRAM)
- return FALSE;
+ return false;
if (dst->pitch & 63)
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
static void
* that name is still accurate on nv4x) error.
*/
-static boolean
+static bool
nv30_transfer_m2mf(XFER_ARGS)
{
if (!src->pitch || !dst->pitch)
- return FALSE;
+ return false;
if (nv30_transfer_scaled(src, dst))
- return FALSE;
- return TRUE;
+ return false;
+ return true;
}
static void
}
}
-static boolean
+static bool
nv30_transfer_cpu(XFER_ARGS)
{
if (nv30_transfer_scaled(src, dst))
- return FALSE;
- return TRUE;
+ return false;
+ return true;
}
static char *
{
static const struct {
char *name;
- boolean (*possible)(XFER_ARGS);
+ bool (*possible)(XFER_ARGS);
void (*execute)(XFER_ARGS);
} *method, methods[] = {
{ "m2mf", nv30_transfer_m2mf, nv30_transfer_rect_m2mf },
} else {
nouveau_buffer_migrate(&nv30->base, buf, NOUVEAU_BO_GART);
}
- nv30->base.vbo_dirty = TRUE;
+ nv30->base.vbo_dirty = true;
}
}
}
NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
0, NV30_3D_VTXBUF_DMA1);
}
- nv30->base.vbo_dirty = TRUE;
+ nv30->base.vbo_dirty = true;
}
static INLINE void
for (i = 0; i < vertex->num_elements; i++) {
struct nv04_resource *res;
unsigned offset;
- boolean user;
+ bool user;
ve = &vertex->pipe[i];
vb = &nv30->vtxbuf[ve->vertex_buffer_index];
return NULL;
memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
so->num_elements = num_elements;
- so->need_conversion = FALSE;
+ so->need_conversion = false;
transkey.nr_elements = 0;
transkey.output_stride = 0;
return NULL;
}
so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
- so->need_conversion = TRUE;
+ so->need_conversion = true;
}
if (1) {
}
static void
-nv30_draw_elements(struct nv30_context *nv30, boolean shorten,
+nv30_draw_elements(struct nv30_context *nv30, bool shorten,
unsigned mode, unsigned start, unsigned count,
unsigned instance_count, int32_t index_bias)
{
if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS)))
nv30_update_user_vbufs(nv30);
- nv30_state_validate(nv30, ~0, TRUE);
+ nv30_state_validate(nv30, ~0, true);
if (nv30->draw_flags) {
nv30_render_vbo(pipe, info);
return;
if (!nv30->vtxbuf[i].buffer)
continue;
if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
- nv30->base.vbo_dirty = TRUE;
+ nv30->base.vbo_dirty = true;
}
if (!nv30->base.vbo_dirty && nv30->idxbuf.buffer &&
nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
- nv30->base.vbo_dirty = TRUE;
+ nv30->base.vbo_dirty = true;
if (nv30->base.vbo_dirty) {
BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1);
PUSH_DATA (push, 0);
- nv30->base.vbo_dirty = FALSE;
+ nv30->base.vbo_dirty = false;
}
if (!info->indexed) {
info->mode, info->start, info->count,
info->instance_count);
} else {
- boolean shorten = info->max_index <= 65535;
+ bool shorten = info->max_index <= 65535;
if (info->primitive_restart != nv30->state.prim_restart) {
if (info->primitive_restart) {
PUSH_DATA (push, info->restart_index);
if (info->restart_index > 65535)
- shorten = FALSE;
+ shorten = false;
} else {
BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 1);
PUSH_DATA (push, 0);
PUSH_DATA (push, info->restart_index);
if (info->restart_index > 65535)
- shorten = FALSE;
+ shorten = false;
}
nv30_draw_elements(nv30, shorten,
vp->consts = NULL;
vp->nr_consts = 0;
- vp->translated = FALSE;
+ vp->translated = false;
}
void
struct nouveau_object *eng3d = nv30->screen->eng3d;
struct nv30_vertprog *vp = nv30->vertprog.program;
struct nv30_fragprog *fp = nv30->fragprog.program;
- boolean upload_code = FALSE;
- boolean upload_data = FALSE;
+ bool upload_code = false;
+ bool upload_data = false;
unsigned i;
if (nv30->dirty & NV30_NEW_FRAGPROG) {
}
}
- upload_code = TRUE;
+ upload_code = true;
}
if (vp->nr_consts && !vp->data) {
}
}
- upload_code = TRUE;
- upload_data = TRUE;
+ upload_code = true;
+ upload_data = true;
}
if (vp->nr_consts) {
return mask;
}
-static boolean
+static bool
nvfx_fragprog_parse_instruction(struct nvfx_fpc *fpc,
const struct tgsi_full_instruction *finst)
{
int i;
if (finst->Instruction.Opcode == TGSI_OPCODE_END)
- return TRUE;
+ return true;
for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
const struct tgsi_full_src_register *fsrc;
break;
default:
NOUVEAU_ERR("bad src file\n");
- return FALSE;
+ return false;
}
}
default:
NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
- return FALSE;
+ return false;
}
out:
release_temps(fpc);
- return TRUE;
+ return true;
nv3x_cflow:
{
static int warned = 0;
goto out;
}
-static boolean
+static bool
nvfx_fragprog_parse_decl_input(struct nvfx_fpc *fpc,
const struct tgsi_full_declaration *fdec)
{
case TGSI_SEMANTIC_GENERIC:
case TGSI_SEMANTIC_PCOORD:
/* will be assigned to remaining TC slots later */
- return TRUE;
+ return true;
default:
assert(0);
- return FALSE;
+ return false;
}
fpc->r_input[idx] = nvfx_reg(NVFXSR_INPUT, hw);
- return TRUE;
+ return true;
}
-static boolean
+static bool
nvfx_fragprog_assign_generic(struct nvfx_fpc *fpc,
const struct tgsi_full_declaration *fdec)
{
}
hw = NVFX_FP_OP_INPUT_SRC_TC(hw);
fpc->r_input[idx] = nvfx_reg(NVFXSR_INPUT, hw);
- return TRUE;
+ return true;
}
}
- return FALSE;
+ return false;
default:
- return TRUE;
+ return true;
}
}
-static boolean
+static bool
nvfx_fragprog_parse_decl_output(struct nvfx_fpc *fpc,
const struct tgsi_full_declaration *fdec)
{
}
if(hw > ((fpc->is_nv4x) ? 4 : 2)) {
NOUVEAU_ERR("bad rcol index\n");
- return FALSE;
+ return false;
}
break;
default:
NOUVEAU_ERR("bad output semantic\n");
- return FALSE;
+ return false;
}
fpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
fpc->r_temps |= (1ULL << hw);
- return TRUE;
+ return true;
}
-static boolean
+static bool
nvfx_fragprog_prepare(struct nvfx_fpc *fpc)
{
struct tgsi_parse_context p;
fpc->r_temps_discard = 0ULL;
}
- return TRUE;
+ return true;
out_err:
FREE(fpc->r_temp);
fpc->r_temp = NULL;
tgsi_parse_free(&p);
- return FALSE;
+ return false;
}
-DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp, "NVFX_DUMP_FP", FALSE)
+DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp, "NVFX_DUMP_FP", false)
void
_nvfx_fragprog_translate(uint16_t oclass, struct nv30_fragprog *fp)
struct nvfx_fpc *fpc = NULL;
struct util_dynarray insns;
- fp->translated = FALSE;
+ fp->translated = false;
fp->point_sprite_control = 0;
fp->vp_or = 0;
debug_printf("\n");
}
- fp->translated = TRUE;
+ fp->translated = true;
out:
tgsi_parse_free(&parse);
};
static INLINE struct nvfx_insn
-nvfx_insn(boolean sat, unsigned op, int unit, struct nvfx_reg dst, unsigned mask, struct nvfx_src s0, struct nvfx_src s1, struct nvfx_src s2)
+nvfx_insn(bool sat, unsigned op, int unit, struct nvfx_reg dst, unsigned mask, struct nvfx_src s0, struct nvfx_src s1, struct nvfx_src s2)
{
struct nvfx_insn insn = {
.op = op,
void
_nvfx_fragprog_translate(uint16_t oclass, struct nv30_fragprog *fp);
-boolean
+bool
_nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp);
#endif
return mask;
}
-static boolean
+static bool
nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc,
unsigned idx, const struct tgsi_full_instruction *finst)
{
struct nvfx_insn insn;
struct nvfx_relocation reloc;
struct nvfx_loop_entry loop;
- boolean sat = FALSE;
+ bool sat = false;
int mask;
int ai = -1, ci = -1, ii = -1;
int i;
break;
default:
NOUVEAU_ERR("bad src file\n");
- return FALSE;
+ return false;
}
}
for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
if(src[i].reg.type < 0)
- return FALSE;
+ return false;
}
if(finst->Dst[0].Register.File == TGSI_FILE_ADDRESS &&
finst->Instruction.Opcode != TGSI_OPCODE_ARL)
- return FALSE;
+ return false;
final_dst = dst = tgsi_dst(vpc, &finst->Dst[0]);
mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
if(finst->Instruction.Saturate) {
assert(finst->Instruction.Opcode != TGSI_OPCODE_ARL);
if (vpc->is_nv4x)
- sat = TRUE;
+ sat = true;
else
if(dst.type != NVFXSR_TEMP)
dst = temp(vpc);
break;
default:
NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
- return FALSE;
+ return false;
}
if(finst->Instruction.Saturate && !vpc->is_nv4x) {
}
release_temps(vpc);
- return TRUE;
+ return true;
}
-static boolean
+static bool
nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc,
const struct tgsi_full_declaration *fdec)
{
vpc->r_result[idx] = temp(vpc);
vpc->r_temps_discard = 0;
vpc->cvtx_idx = idx;
- return TRUE;
+ return true;
case TGSI_SEMANTIC_COLOR:
if (fdec->Semantic.Index == 0) {
hw = NVFX_VP(INST_DEST_COL0);
hw = NVFX_VP(INST_DEST_COL1);
} else {
NOUVEAU_ERR("bad colour semantic index\n");
- return FALSE;
+ return false;
}
break;
case TGSI_SEMANTIC_BCOLOR:
hw = NVFX_VP(INST_DEST_BFC1);
} else {
NOUVEAU_ERR("bad bcolour semantic index\n");
- return FALSE;
+ return false;
}
break;
case TGSI_SEMANTIC_FOG:
if (i == num_texcoords) {
vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
- return TRUE;
+ return true;
}
break;
case TGSI_SEMANTIC_EDGEFLAG:
vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
- return TRUE;
+ return true;
default:
NOUVEAU_ERR("bad output semantic\n");
- return FALSE;
+ return false;
}
vpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
- return TRUE;
+ return true;
}
-static boolean
+static bool
nvfx_vertprog_prepare(struct nvfx_vpc *vpc)
{
struct tgsi_parse_context p;
break;
case TGSI_FILE_OUTPUT:
if (!nvfx_vertprog_parse_decl_output(vpc, fdec))
- return FALSE;
+ return false;
break;
default:
break;
}
vpc->r_temps_discard = 0;
- return TRUE;
+ return true;
}
-DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp, "NVFX_DUMP_VP", FALSE)
+DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp, "NVFX_DUMP_VP", false)
-boolean
+bool
_nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp)
{
struct tgsi_parse_context parse;
struct util_dynarray insns;
int i, ucps;
- vp->translated = FALSE;
+ vp->translated = false;
vp->nr_insns = 0;
vp->nr_consts = 0;
vpc = CALLOC_STRUCT(nvfx_vpc);
if (!vpc)
- return FALSE;
+ return false;
vpc->is_nv4x = (oclass >= NV40_3D_CLASS) ? ~0 : 0;
vpc->vp = vp;
vpc->pipe = vp->pipe;
if (!nvfx_vertprog_prepare(vpc)) {
FREE(vpc);
- return FALSE;
+ return false;
}
/* Redirect post-transform vertex position to a temp if user clip
debug_printf("\n");
}
- vp->translated = TRUE;
+ vp->translated = true;
out:
tgsi_parse_free(&parse);
# define nv50_format_table nvc0_format_table
#endif
-/* return TRUE for formats that can be converted among each other by NVC0_2D */
-static INLINE boolean
+/* return true for formats that can be converted among each other by NVC0_2D */
+static INLINE bool
nv50_2d_dst_format_faithful(enum pipe_format format)
{
const uint64_t mask =
uint8_t id = nv50_format_table[format].rt;
return (id >= 0xc0) && (mask & (1ULL << (id - 0xc0)));
}
-static INLINE boolean
+static INLINE bool
nv50_2d_src_format_faithful(enum pipe_format format)
{
const uint64_t mask =
return (id >= 0xc0) && (mask & (1ULL << (id - 0xc0)));
}
-static INLINE boolean
+static INLINE bool
nv50_2d_format_supported(enum pipe_format format)
{
uint8_t id = nv50_format_table[format].rt;
(NV50_ENG2D_SUPPORTED_FORMATS & (1ULL << (id - 0xc0)));
}
-static INLINE boolean
+static INLINE bool
nv50_2d_dst_format_ops_supported(enum pipe_format format)
{
uint8_t id = nv50_format_table[format].rt;
if (!nv50->vtxbuf[i].buffer)
continue;
if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nv50->base.vbo_dirty = TRUE;
+ nv50->base.vbo_dirty = true;
}
if (nv50->idxbuf.buffer &&
nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nv50->base.vbo_dirty = TRUE;
+ nv50->base.vbo_dirty = true;
for (s = 0; s < 3 && !nv50->cb_dirty; ++s) {
uint32_t valid = nv50->constbuf_valid[s];
continue;
if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nv50->cb_dirty = TRUE;
+ nv50->cb_dirty = true;
}
}
}
if (screen) {
nouveau_fence_next(&screen->base);
- nouveau_fence_update(&screen->base, TRUE);
+ nouveau_fence_update(&screen->base, true);
if (screen->cur_ctx)
- screen->cur_ctx->state.flushed = TRUE;
+ screen->cur_ctx->state.flushed = true;
}
}
nv50->base.invalidate_resource_storage = nv50_invalidate_resource_storage;
if (screen->base.device->chipset < 0x84 ||
- debug_get_bool_option("NOUVEAU_PMPEG", FALSE)) {
+ debug_get_bool_option("NOUVEAU_PMPEG", false)) {
/* PMPEG */
nouveau_context_init_vdec(&nv50->base);
} else if (screen->base.device->chipset < 0x98 ||
}
void
-nv50_bufctx_fence(struct nouveau_bufctx *bufctx, boolean on_flush)
+nv50_bufctx_fence(struct nouveau_bufctx *bufctx, bool on_flush)
{
struct nouveau_list *list = on_flush ? &bufctx->current : &bufctx->pending;
struct nouveau_list *it;
struct nv50_blitctx;
-boolean nv50_blitctx_create(struct nv50_context *);
+bool nv50_blitctx_create(struct nv50_context *);
struct nv50_context {
struct nouveau_context base;
struct nouveau_bufctx *bufctx;
uint32_t dirty;
- boolean cb_dirty;
+ bool cb_dirty;
struct nv50_graph_state state;
unsigned sample_mask;
unsigned min_samples;
- boolean vbo_push_hint;
+ bool vbo_push_hint;
uint32_t rt_array_mode;
struct pipe_query *cond_query;
- boolean cond_cond; /* inverted rendering condition */
+ bool cond_cond; /* inverted rendering condition */
uint cond_mode;
uint32_t cond_condmode; /* the calculated condition */
/* nv50_context.c */
struct pipe_context *nv50_create(struct pipe_screen *, void *);
-void nv50_bufctx_fence(struct nouveau_bufctx *, boolean on_flush);
+void nv50_bufctx_fence(struct nouveau_bufctx *, bool on_flush);
void nv50_default_kick_notify(struct nouveau_pushbuf *);
void nv84_query_fifo_wait(struct nouveau_pushbuf *, struct pipe_query *);
void nva0_so_target_save_offset(struct pipe_context *,
struct pipe_stream_output_target *,
- unsigned index, boolean seralize);
+ unsigned index, bool seralize);
#define NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET (PIPE_QUERY_TYPES + 0)
/* nv50_state_validate.c */
/* @words: check for space before emitting relocs */
-extern boolean nv50_state_validate(struct nv50_context *, uint32_t state_mask,
- unsigned space_words);
+extern bool nv50_state_validate(struct nv50_context *, uint32_t state_mask,
+ unsigned space_words);
/* nv50_surface.c */
extern void nv50_clear(struct pipe_context *, unsigned buffers,
uint32_t
nv50_tex_choose_tile_dims_helper(unsigned nx, unsigned ny, unsigned nz,
- boolean is_3d)
+ bool is_3d)
{
uint32_t tile_mode = 0x000;
}
static uint32_t
-nv50_tex_choose_tile_dims(unsigned nx, unsigned ny, unsigned nz, boolean is_3d)
+nv50_tex_choose_tile_dims(unsigned nx, unsigned ny, unsigned nz, bool is_3d)
{
return nv50_tex_choose_tile_dims_helper(nx, ny * 2, nz, is_3d);
}
static uint32_t
-nv50_mt_choose_storage_type(struct nv50_miptree *mt, boolean compressed)
+nv50_mt_choose_storage_type(struct nv50_miptree *mt, bool compressed)
{
const unsigned ms = util_logbase2(mt->base.base.nr_samples);
uint32_t tile_flags;
unsigned stride;
if (!mt || !mt->base.bo)
- return FALSE;
+ return false;
stride = mt->level[0].pitch;
u_default_transfer_inline_write /* transfer_inline_write */
};
-static INLINE boolean
+static INLINE bool
nv50_miptree_init_ms_mode(struct nv50_miptree *mt)
{
switch (mt->base.base.nr_samples) {
break;
default:
NOUVEAU_ERR("invalid nr_samples: %u\n", mt->base.base.nr_samples);
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
-boolean
+bool
nv50_miptree_init_layout_linear(struct nv50_miptree *mt, unsigned pitch_align)
{
struct pipe_resource *pt = &mt->base.base;
unsigned h = pt->height0;
if (util_format_is_depth_or_stencil(pt->format))
- return FALSE;
+ return false;
if ((pt->last_level > 0) || (pt->depth0 > 1) || (pt->array_size > 1))
- return FALSE;
+ return false;
if (mt->ms_x | mt->ms_y)
- return FALSE;
+ return false;
mt->level[0].pitch = align(pt->width0 * blocksize, pitch_align);
mt->total_size = mt->level[0].pitch * h;
- return TRUE;
+ return true;
}
static void
struct nouveau_device *dev = nouveau_screen(pscreen)->device;
struct nv50_miptree *mt = CALLOC_STRUCT(nv50_miptree);
struct pipe_resource *pt = &mt->base.base;
- boolean compressed = dev->drm_version >= 0x01000101;
+ bool compressed = dev->drm_version >= 0x01000101;
int ret;
union nouveau_bo_config bo_config;
uint32_t bo_flags;
prog->vp.bfc[info->out[i].si] = i;
break;
case TGSI_SEMANTIC_LAYER:
- prog->gp.has_layer = TRUE;
+ prog->gp.has_layer = true;
prog->gp.layerid = n;
break;
case TGSI_SEMANTIC_VIEWPORT_INDEX:
return so;
}
-boolean
+bool
nv50_program_translate(struct nv50_program *prog, uint16_t chipset)
{
struct nv50_ir_prog_info *info;
info = CALLOC_STRUCT(nv50_ir_prog_info);
if (!info)
- return FALSE;
+ return false;
info->type = prog->type;
info->target = chipset;
return !ret;
}
-boolean
+bool
nv50_program_upload_code(struct nv50_context *nv50, struct nv50_program *prog)
{
struct nouveau_heap *heap;
case PIPE_SHADER_FRAGMENT: heap = nv50->screen->gp_code_heap; break;
default:
assert(!"invalid program type");
- return FALSE;
+ return false;
}
ret = nouveau_heap_alloc(heap, size, prog, &prog->mem);
ret = nouveau_heap_alloc(heap, size, prog, &prog->mem);
if (ret) {
NOUVEAU_ERR("shader too large (0x%x) to fit in code space ?\n", size);
- return FALSE;
+ return false;
}
}
prog->code_base = prog->mem->start;
ret = nv50_tls_realloc(nv50->screen, prog->tls_space);
if (ret < 0) {
nouveau_heap_free(&prog->mem);
- return FALSE;
+ return false;
}
if (ret > 0)
- nv50->state.new_tls_space = TRUE;
+ nv50->state.new_tls_space = true;
if (prog->fixups)
nv50_ir_relocate_code(prog->fixups, prog->code, prog->code_base, 0, 0);
BEGIN_NV04(nv50->base.pushbuf, NV50_3D(CODE_CB_FLUSH), 1);
PUSH_DATA (nv50->base.pushbuf, 0);
- return TRUE;
+ return true;
}
void
struct pipe_shader_state pipe;
ubyte type;
- boolean translated;
+ bool translated;
uint32_t *code;
unsigned code_size;
struct nv50_stream_output_state *so;
};
-boolean nv50_program_translate(struct nv50_program *, uint16_t chipset);
-boolean nv50_program_upload_code(struct nv50_context *, struct nv50_program *);
+bool nv50_program_translate(struct nv50_program *, uint16_t chipset);
+bool nv50_program_upload_code(struct nv50_context *, struct nv50_program *);
void nv50_program_destroy(struct nv50_context *, struct nv50_program *);
#endif /* __NV50_PROG_H__ */
struct translate *translate;
- boolean primitive_restart;
+ bool primitive_restart;
uint32_t prim;
uint32_t restart_index;
uint32_t instance_id;
unsigned i, index_size;
unsigned inst_count = info->instance_count;
unsigned vert_count = info->count;
- boolean apply_bias = info->indexed && info->index_bias;
+ bool apply_bias = info->indexed && info->index_bias;
ctx.push = nv50->base.pushbuf;
ctx.translate = nv50->vertex->translate;
NOUVEAU_ERR("draw_stream_output not supported on pre-NVA0 cards\n");
return;
}
- pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count);
+ pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
vert_count /= targ->stride;
}
ctx.idxbuf = NULL;
index_size = 0;
- ctx.primitive_restart = FALSE;
+ ctx.primitive_restart = false;
ctx.restart_index = 0;
}
uint32_t base;
uint32_t offset; /* base + i * 32 */
uint8_t state;
- boolean is64bit;
+ bool is64bit;
int nesting; /* only used for occlusion queries */
struct nouveau_mm_allocation *mm;
struct nouveau_fence *fence;
return (struct nv50_query *)pipe;
}
-static boolean
+static bool
nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size)
{
struct nv50_screen *screen = nv50->screen;
if (size) {
q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
if (!q->bo)
- return FALSE;
+ return false;
q->offset = q->base;
ret = nouveau_bo_map(q->bo, 0, screen->base.client);
if (ret) {
nv50_query_allocate(nv50, q, 0);
- return FALSE;
+ return false;
}
q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base);
}
- return TRUE;
+ return true;
}
static void
struct nv50_query *q = nv50_query(pq);
/* For occlusion queries we have to change the storage, because a previous
- * query might set the initial render conition to FALSE even *after* we re-
- * initialized it to TRUE.
+ * query might set the initial render conition to false even *after* we re-
+ * initialized it to true.
*/
if (q->type == PIPE_QUERY_OCCLUSION_COUNTER) {
q->offset += 32;
* query ?
*/
q->data[0] = q->sequence; /* initialize sequence */
- q->data[1] = 1; /* initial render condition = TRUE */
+ q->data[1] = 1; /* initial render condition = true */
q->data[4] = q->sequence + 1; /* for comparison COND_MODE */
q->data[5] = 0;
}
nv50_query_get(push, q, 0, 0x0d005002 | (q->index << 5));
break;
case PIPE_QUERY_TIMESTAMP_DISJOINT:
- /* This query is not issued on GPU because disjoint is forced to FALSE */
+ /* This query is not issued on GPU because disjoint is forced to false */
q->state = NV50_QUERY_STATE_READY;
break;
default:
struct nv50_query *q = nv50_query(pq);
uint64_t *res64 = (uint64_t *)result;
uint32_t *res32 = (uint32_t *)result;
- boolean *res8 = (boolean *)result;
+ uint8_t *res8 = (uint8_t *)result;
uint64_t *data64 = (uint64_t *)q->data;
int i;
q->state = NV50_QUERY_STATE_FLUSHED;
PUSH_KICK(nv50->base.pushbuf);
}
- return FALSE;
+ return false;
}
if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nv50->screen->base.client))
- return FALSE;
+ return false;
}
q->state = NV50_QUERY_STATE_READY;
switch (q->type) {
case PIPE_QUERY_GPU_FINISHED:
- res8[0] = TRUE;
+ res8[0] = true;
break;
case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */
res64[0] = q->data[1] - q->data[5];
break;
case PIPE_QUERY_TIMESTAMP_DISJOINT:
res64[0] = 1000000000;
- res8[8] = FALSE;
+ res8[8] = false;
break;
case PIPE_QUERY_TIME_ELAPSED:
res64[0] = data64[1] - data64[3];
res32[0] = q->data[1];
break;
default:
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
void
struct nouveau_pushbuf *push = nv50->base.pushbuf;
struct nv50_query *q;
uint32_t cond;
- boolean wait =
+ bool wait =
mode != PIPE_RENDER_COND_NO_WAIT &&
mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
cond = condition ? NV50_3D_COND_MODE_EQUAL :
NV50_3D_COND_MODE_NOT_EQUAL;
- wait = TRUE;
+ wait = true;
break;
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
void
nva0_so_target_save_offset(struct pipe_context *pipe,
struct pipe_stream_output_target *ptarg,
- unsigned index, boolean serialize)
+ unsigned index, bool serialize)
{
struct nv50_so_target *targ = nv50_so_target(ptarg);
uint32_t
nv50_tex_choose_tile_dims_helper(unsigned nx, unsigned ny, unsigned nz,
- boolean is_3d);
+ bool is_3d);
struct nv50_miptree_level {
uint32_t offset;
struct nv50_miptree_level level[NV50_MAX_TEXTURE_LEVELS];
uint32_t total_size;
uint32_t layer_stride;
- boolean layout_3d; /* TRUE if layer count varies with mip level */
+ bool layout_3d; /* true if layer count varies with mip level */
uint8_t ms_x; /* log2 of number of samples in x/y dimension */
uint8_t ms_y;
uint8_t ms_mode;
/* Internal functions:
*/
-boolean
+bool
nv50_miptree_init_layout_linear(struct nv50_miptree *mt, unsigned pitch_align);
struct pipe_resource *
unsigned bindings)
{
if (sample_count > 8)
- return FALSE;
+ return false;
if (!(0x117 & (1 << sample_count))) /* 0, 1, 2, 4 or 8 */
- return FALSE;
+ return false;
if (sample_count == 8 && util_format_get_blocksizebits(format) >= 128)
- return FALSE;
+ return false;
if (!util_format_is_supported(format, bindings))
- return FALSE;
+ return false;
switch (format) {
case PIPE_FORMAT_Z16_UNORM:
if (nv50_screen(pscreen)->tesla->oclass < NVA0_3D_CLASS)
- return FALSE;
+ return false;
break;
default:
break;
BEGIN_NV04(push, NV50_3D(UNK1400_LANES), 1);
PUSH_DATA (push, 0xf);
- if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", TRUE)) {
+ if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", true)) {
BEGIN_NV04(push, NV50_3D(WATCHDOG_TIMER), 1);
PUSH_DATA (push, 0x18);
}
nv50_screen_init_resource_functions(pscreen);
if (screen->base.device->chipset < 0x84 ||
- debug_get_bool_option("NOUVEAU_PMPEG", FALSE)) {
+ debug_get_bool_option("NOUVEAU_PMPEG", false)) {
/* PMPEG */
nouveau_screen_init_vdec(&screen->base);
} else if (screen->base.device->chipset < 0x98 ||
nv50_screen_init_hwctx(screen);
- nouveau_fence_new(&screen->base, &screen->base.fence.current, FALSE);
+ nouveau_fence_new(&screen->base, &screen->base.fence.current, false);
return pscreen;
uint32_t semantic_color;
uint32_t semantic_psize;
int32_t index_bias;
- boolean uniform_buffer_bound[3];
- boolean prim_restart;
- boolean point_sprite;
- boolean rt_serialize;
- boolean flushed;
- boolean rasterizer_discard;
+ bool uniform_buffer_bound[3];
+ bool prim_restart;
+ bool point_sprite;
+ bool rt_serialize;
+ bool flushed;
+ bool rasterizer_discard;
uint8_t tls_required;
- boolean new_tls_space;
+ bool new_tls_space;
uint8_t num_vtxbufs;
uint8_t num_vtxelts;
uint8_t num_textures[3];
return (struct nv50_screen *)screen;
}
-boolean nv50_blitter_create(struct nv50_screen *);
+bool nv50_blitter_create(struct nv50_screen *);
void nv50_blitter_destroy(struct nv50_screen *);
int nv50_screen_tic_alloc(struct nv50_screen *, void *);
continue;
}
if (!nv50->state.uniform_buffer_bound[s]) {
- nv50->state.uniform_buffer_bound[s] = TRUE;
+ nv50->state.uniform_buffer_bound[s] = true;
BEGIN_NV04(push, NV50_3D(SET_PROGRAM_CB), 1);
PUSH_DATA (push, (b << 12) | (i << 8) | p | 1);
}
PUSH_DATA (push, (i << 8) | p | 0);
}
if (i == 0)
- nv50->state.uniform_buffer_bound[s] = FALSE;
+ nv50->state.uniform_buffer_bound[s] = false;
}
}
}
}
-static boolean
+static bool
nv50_program_validate(struct nv50_context *nv50, struct nv50_program *prog)
{
if (!prog->translated) {
prog->translated = nv50_program_translate(
prog, nv50->screen->base.device->chipset);
if (!prog->translated)
- return FALSE;
+ return false;
} else
if (prog->mem)
- return TRUE;
+ return true;
return nv50_program_upload_code(nv50, prog);
}
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TLS);
if (!nv50->state.tls_required || nv50->state.new_tls_space)
BCTX_REFN_bo(nv50->bufctx_3d, TLS, flags, nv50->screen->tls_bo);
- nv50->state.new_tls_space = FALSE;
+ nv50->state.new_tls_space = false;
nv50->state.tls_required |= 1 << stage;
} else {
if (nv50->state.tls_required == (1 << stage))
for (i = 0; i < 8; ++i)
PUSH_DATA(push, 0);
- nv50->state.point_sprite = FALSE;
+ nv50->state.point_sprite = false;
}
return;
} else {
- nv50->state.point_sprite = TRUE;
+ nv50->state.point_sprite = true;
}
memset(pntc, 0, sizeof(pntc));
nv50_query_pushbuf_submit(push, targ->pq, 0x4);
} else {
PUSH_DATA(push, 0);
- targ->clean = FALSE;
+ targ->clean = false;
}
} else {
const unsigned limit = targ->pipe.buffer_size /
{
struct nv50_blend_stateobj *so = CALLOC_STRUCT(nv50_blend_stateobj);
int i;
- boolean emit_common_func = cso->rt[0].blend_enable;
+ bool emit_common_func = cso->rt[0].blend_enable;
uint32_t ms;
if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) {
for (i = 0; i < 8; ++i) {
SB_DATA(so, cso->rt[i].blend_enable);
if (cso->rt[i].blend_enable)
- emit_common_func = TRUE;
+ emit_common_func = true;
}
if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) {
- emit_common_func = FALSE;
+ emit_common_func = false;
for (i = 0; i < 8; ++i) {
if (!cso->rt[i].blend_enable)
pipe_resource_reference(&nv50->constbuf[s][i].u.buf, res);
- nv50->constbuf[s][i].user = (cb && cb->user_buffer) ? TRUE : FALSE;
+ nv50->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false;
if (nv50->constbuf[s][i].user) {
nv50->constbuf[s][i].u.data = cb->user_buffer;
nv50->constbuf[s][i].size = MIN2(cb->buffer_size, 0x10000);
} else {
targ->pq = NULL;
}
- targ->clean = TRUE;
+ targ->clean = true;
targ->pipe.buffer_size = size;
targ->pipe.buffer_offset = offset;
{
struct nv50_context *nv50 = nv50_context(pipe);
unsigned i;
- boolean serialize = TRUE;
- const boolean can_resume = nv50->screen->base.class_3d >= NVA0_3D_CLASS;
+ bool serialize = true;
+ const bool can_resume = nv50->screen->base.class_3d >= NVA0_3D_CLASS;
assert(num_targets <= 4);
for (i = 0; i < num_targets; ++i) {
- const boolean changed = nv50->so_target[i] != targets[i];
- const boolean append = (offsets[i] == (unsigned)-1);
+ const bool changed = nv50->so_target[i] != targets[i];
+ const bool append = (offsets[i] == (unsigned)-1);
if (!changed && append)
continue;
nv50->so_targets_dirty |= 1 << i;
if (can_resume && changed && nv50->so_target[i]) {
nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize);
- serialize = FALSE;
+ serialize = false;
}
if (targets[i] && !append)
- nv50_so_target(targets[i])->clean = TRUE;
+ nv50_so_target(targets[i])->clean = true;
pipe_so_target_reference(&nv50->so_target[i], targets[i]);
}
for (; i < nv50->num_so_targets; ++i) {
if (can_resume && nv50->so_target[i]) {
nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize);
- serialize = FALSE;
+ serialize = false;
}
pipe_so_target_reference(&nv50->so_target[i], NULL);
nv50->so_targets_dirty |= 1 << i;
ms_mode = mt->ms_mode;
if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
- nv50->state.rt_serialize = TRUE;
+ nv50->state.rt_serialize = true;
mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
ms_mode = mt->ms_mode;
if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
- nv50->state.rt_serialize = TRUE;
+ nv50->state.rt_serialize = true;
mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
};
#define validate_list_len (sizeof(validate_list) / sizeof(validate_list[0]))
-boolean
+bool
nv50_state_validate(struct nv50_context *nv50, uint32_t mask, unsigned words)
{
uint32_t state_mask;
nv50->dirty &= ~state_mask;
if (nv50->state.rt_serialize) {
- nv50->state.rt_serialize = FALSE;
+ nv50->state.rt_serialize = false;
BEGIN_NV04(nv50->base.pushbuf, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
PUSH_DATA (nv50->base.pushbuf, 0);
}
- nv50_bufctx_fence(nv50->bufctx_3d, FALSE);
+ nv50_bufctx_fence(nv50->bufctx_3d, false);
}
nouveau_pushbuf_bufctx(nv50->base.pushbuf, nv50->bufctx_3d);
ret = nouveau_pushbuf_validate(nv50->base.pushbuf);
if (unlikely(nv50->state.flushed)) {
- nv50->state.flushed = FALSE;
- nv50_bufctx_fence(nv50->bufctx_3d, TRUE);
+ nv50->state.flushed = false;
+ nv50_bufctx_fence(nv50->bufctx_3d, true);
}
return !ret;
}
} u;
uint32_t size; /* max 65536 */
uint32_t offset;
- boolean user; /* should only be TRUE if u.data is valid and non-NULL */
+ bool user; /* should only be true if u.data is valid and non-NULL */
};
struct nv50_vertex_element {
unsigned num_elements;
uint32_t instance_elts;
uint32_t instance_bufs;
- boolean need_conversion;
+ bool need_conversion;
unsigned vertex_size;
unsigned packet_vertex_limit;
struct nv50_vertex_element element[0];
struct pipe_stream_output_target pipe;
struct pipe_query *pq;
unsigned stride;
- boolean clean;
+ bool clean;
};
static INLINE struct nv50_so_target *
#include "nv50/nv50_blit.h"
static INLINE uint8_t
-nv50_2d_format(enum pipe_format format, boolean dst, boolean dst_src_equal)
+nv50_2d_format(enum pipe_format format, bool dst, bool dst_src_equal)
{
uint8_t id = nv50_format_table[format].rt;
static int
nv50_2d_texture_set(struct nouveau_pushbuf *push, int dst,
struct nv50_miptree *mt, unsigned level, unsigned layer,
- enum pipe_format pformat, boolean dst_src_pformat_equal)
+ enum pipe_format pformat, bool dst_src_pformat_equal)
{
struct nouveau_bo *bo = mt->base.bo;
uint32_t width, height, depth;
const enum pipe_format dfmt = dst->base.base.format;
const enum pipe_format sfmt = src->base.base.format;
int ret;
- boolean eqfmt = dfmt == sfmt;
+ bool eqfmt = dfmt == sfmt;
if (!PUSH_SPACE(push, 2 * 16 + 32))
return PIPE_ERROR;
{
struct nv50_context *nv50 = nv50_context(pipe);
int ret;
- boolean m2mf;
+ bool m2mf;
unsigned dst_layer = dstz, src_layer = src_box->z;
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
};
blit->vp.type = PIPE_SHADER_VERTEX;
- blit->vp.translated = TRUE;
+ blit->vp.translated = true;
blit->vp.code = (uint32_t *)code; /* const_cast */
blit->vp.code_size = sizeof(code);
blit->vp.max_gpr = 4;
const unsigned target = nv50_blit_get_tgsi_texture_target(ptarg);
- boolean tex_rgbaz = FALSE;
- boolean tex_s = FALSE;
- boolean cvt_un8 = FALSE;
+ bool tex_rgbaz = false;
+ bool tex_s = false;
+ bool cvt_un8 = false;
if (mode != NV50_BLIT_MODE_PASS &&
mode != NV50_BLIT_MODE_Z24X8 &&
mode != NV50_BLIT_MODE_X8Z24)
- tex_s = TRUE;
+ tex_s = true;
if (mode != NV50_BLIT_MODE_X24S8 &&
mode != NV50_BLIT_MODE_S8X24 &&
mode != NV50_BLIT_MODE_XS)
- tex_rgbaz = TRUE;
+ tex_rgbaz = true;
if (mode != NV50_BLIT_MODE_PASS &&
mode != NV50_BLIT_MODE_ZS &&
mode != NV50_BLIT_MODE_XS)
- cvt_un8 = TRUE;
+ cvt_un8 = true;
ureg = ureg_create(TGSI_PROCESSOR_FRAGMENT);
if (!ureg)
int i;
uint32_t mode;
uint32_t mask = nv50_blit_eng2d_get_mask(info);
- boolean b;
+ bool b;
mode = nv50_blit_get_filter(info) ?
NV50_2D_BLIT_CONTROL_FILTER_BILINEAR :
PUSH_DATA (push, srcy >> 32);
}
}
- nv50_bufctx_fence(nv50->bufctx, FALSE);
+ nv50_bufctx_fence(nv50->bufctx, false);
nouveau_bufctx_reset(nv50->bufctx, NV50_BIND_2D);
{
struct nv50_context *nv50 = nv50_context(pipe);
struct nouveau_pushbuf *push = nv50->base.pushbuf;
- boolean eng3d = FALSE;
+ bool eng3d = FALSE;
if (util_format_is_depth_or_stencil(info->dst.resource->format)) {
if (!(info->mask & PIPE_MASK_ZS))
return;
if (info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT ||
info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT)
- eng3d = TRUE;
+ eng3d = true;
if (info->filter != PIPE_TEX_FILTER_NEAREST)
- eng3d = TRUE;
+ eng3d = true;
} else {
if (!(info->mask & PIPE_MASK_RGBA))
return;
if (info->mask != PIPE_MASK_RGBA)
- eng3d = TRUE;
+ eng3d = true;
}
if (nv50_miptree(info->src.resource)->layout_3d) {
- eng3d = TRUE;
+ eng3d = true;
} else
if (info->src.box.depth != info->dst.box.depth) {
- eng3d = TRUE;
+ eng3d = true;
debug_printf("blit: cannot filter array or cube textures in z direction");
}
if (!eng3d && info->dst.format != info->src.format) {
if (!nv50_2d_dst_format_faithful(info->dst.format) ||
!nv50_2d_src_format_faithful(info->src.format)) {
- eng3d = TRUE;
+ eng3d = true;
} else
if (!nv50_2d_src_format_faithful(info->src.format)) {
if (!util_format_is_luminance(info->src.format)) {
if (util_format_is_intensity(info->src.format))
- eng3d = TRUE;
+ eng3d = true;
else
if (!nv50_2d_dst_format_ops_supported(info->dst.format))
- eng3d = TRUE;
+ eng3d = true;
else
eng3d = !nv50_2d_format_supported(info->src.format);
}
} else
if (util_format_is_luminance_alpha(info->src.format))
- eng3d = TRUE;
+ eng3d = true;
}
if (info->src.resource->nr_samples == 8 &&
info->dst.resource->nr_samples <= 1)
- eng3d = TRUE;
+ eng3d = true;
/* FIXME: can't make this work with eng2d anymore */
if ((info->src.resource->nr_samples | 1) !=
(info->dst.resource->nr_samples | 1))
- eng3d = TRUE;
+ eng3d = true;
/* FIXME: find correct src coordinate adjustments */
if ((info->src.box.width != info->dst.box.width &&
info->src.box.width != -info->dst.box.width) ||
(info->src.box.height != info->dst.box.height &&
info->src.box.height != -info->dst.box.height))
- eng3d = TRUE;
+ eng3d = true;
if (nv50->screen->num_occlusion_queries_active) {
BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
{
}
-boolean
+bool
nv50_blitter_create(struct nv50_screen *screen)
{
screen->blitter = CALLOC_STRUCT(nv50_blitter);
if (!screen->blitter) {
NOUVEAU_ERR("failed to allocate blitter struct\n");
- return FALSE;
+ return false;
}
pipe_mutex_init(screen->blitter->mutex);
nv50_blitter_make_vp(screen->blitter);
nv50_blitter_make_sampler(screen->blitter);
- return TRUE;
+ return true;
}
void
FREE(blitter);
}
-boolean
+bool
nv50_blitctx_create(struct nv50_context *nv50)
{
nv50->blit = CALLOC_STRUCT(nv50_blitctx);
if (!nv50->blit) {
NOUVEAU_ERR("failed to allocate blit context\n");
- return FALSE;
+ return false;
}
nv50->blit->nv50 = nv50;
nv50->blit->rast.pipe.half_pixel_center = 1;
- return TRUE;
+ return true;
}
void
NV50_TIC_0_MAPG__MASK | NV50_TIC_0_MAPR__MASK)
static INLINE uint32_t
-nv50_tic_swizzle(uint32_t tc, unsigned swz, boolean tex_int)
+nv50_tic_swizzle(uint32_t tc, unsigned swz, bool tex_int)
{
switch (swz) {
case PIPE_SWIZZLE_RED:
uint32_t depth;
struct nv50_tic_entry *view;
struct nv50_miptree *mt = nv50_miptree(texture);
- boolean tex_int;
+ bool tex_int;
view = MALLOC_STRUCT(nv50_tic_entry);
if (!view)
break;
default:
NOUVEAU_ERR("invalid texture target: %d\n", mt->base.base.target);
- return FALSE;
+ return false;
}
tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000;
return &view->pipe;
}
-static boolean
+static bool
nv50_validate_tic(struct nv50_context *nv50, int s)
{
struct nouveau_pushbuf *push = nv50->base.pushbuf;
struct nouveau_bo *txc = nv50->screen->txc;
unsigned i;
- boolean need_flush = FALSE;
+ bool need_flush = false;
assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
for (i = 0; i < nv50->num_textures[s]; ++i) {
BEGIN_NI04(push, NV50_2D(SIFC_DATA), 8);
PUSH_DATAp(push, &tic->tic[0], 8);
- need_flush = TRUE;
+ need_flush = true;
} else
if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
BEGIN_NV04(push, NV50_3D(TEX_CACHE_CTL), 1);
void nv50_validate_textures(struct nv50_context *nv50)
{
- boolean need_flush;
+ bool need_flush;
need_flush = nv50_validate_tic(nv50, 0);
need_flush |= nv50_validate_tic(nv50, 1);
}
}
-static boolean
+static bool
nv50_validate_tsc(struct nv50_context *nv50, int s)
{
struct nouveau_pushbuf *push = nv50->base.pushbuf;
unsigned i;
- boolean need_flush = FALSE;
+ bool need_flush = false;
assert(nv50->num_samplers[s] <= PIPE_MAX_SAMPLERS);
for (i = 0; i < nv50->num_samplers[s]; ++i) {
nv50_sifc_linear_u8(&nv50->base, nv50->screen->txc,
65536 + tsc->id * 32,
NOUVEAU_BO_VRAM, 32, tsc->tsc);
- need_flush = TRUE;
+ need_flush = true;
}
nv50->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
void nv50_validate_samplers(struct nv50_context *nv50)
{
- boolean need_flush;
+ bool need_flush;
need_flush = nv50_validate_tsc(nv50, 0);
need_flush |= nv50_validate_tsc(nv50, 1);
so->num_elements = num_elements;
so->instance_elts = 0;
so->instance_bufs = 0;
- so->need_conversion = FALSE;
+ so->need_conversion = false;
memset(so->vb_access_size, 0, sizeof(so->vb_access_size));
return NULL;
}
so->element[i].state = nv50_format_table[fmt].vtx;
- so->need_conversion = TRUE;
+ so->need_conversion = true;
}
so->element[i].state |= i;
BCTX_REFN_bo(nv50->bufctx_3d, VERTEX_TMP, NOUVEAU_BO_GART |
NOUVEAU_BO_RD, bo);
}
- nv50->base.vbo_dirty = TRUE;
+ nv50->base.vbo_dirty = true;
}
static void
PUSH_DATAh(push, address[b] + ve->src_offset);
PUSH_DATA (push, address[b] + ve->src_offset);
}
- nv50->base.vbo_dirty = TRUE;
+ nv50->base.vbo_dirty = true;
}
static INLINE void
struct nv04_resource *buf = nv04_resource(nv50->vtxbuf[i].buffer);
if (buf && buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
- nv50->base.vbo_dirty = TRUE;
+ nv50->base.vbo_dirty = true;
break;
}
}
}
static void
-nv50_draw_elements(struct nv50_context *nv50, boolean shorten,
+nv50_draw_elements(struct nv50_context *nv50, bool shorten,
unsigned mode, unsigned start, unsigned count,
unsigned instance_count, int32_t index_bias)
{
{
struct nv50_screen *screen = chan->user_priv;
- nouveau_fence_update(&screen->base, TRUE);
+ nouveau_fence_update(&screen->base, true);
- nv50_bufctx_fence(screen->cur_ctx->bufctx_3d, TRUE);
+ nv50_bufctx_fence(screen->cur_ctx->bufctx_3d, true);
}
void
continue;
if (res->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
- nv50->cb_dirty = TRUE;
+ nv50->cb_dirty = true;
}
}
if (nv50->cb_dirty) {
BEGIN_NV04(push, NV50_3D(CODE_CB_FLUSH), 1);
PUSH_DATA (push, 0);
- nv50->cb_dirty = FALSE;
+ nv50->cb_dirty = false;
}
if (nv50->vbo_fifo) {
if (!nv50->vtxbuf[i].buffer)
continue;
if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
- nv50->base.vbo_dirty = TRUE;
+ nv50->base.vbo_dirty = true;
}
if (!nv50->base.vbo_dirty && nv50->idxbuf.buffer &&
nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
- nv50->base.vbo_dirty = TRUE;
+ nv50->base.vbo_dirty = true;
if (nv50->base.vbo_dirty) {
BEGIN_NV04(push, NV50_3D(VERTEX_ARRAY_FLUSH), 1);
PUSH_DATA (push, 0);
- nv50->base.vbo_dirty = FALSE;
+ nv50->base.vbo_dirty = false;
}
if (info->indexed) {
- boolean shorten = info->max_index <= 65535;
+ bool shorten = info->max_index <= 65535;
if (info->primitive_restart != nv50->state.prim_restart) {
if (info->primitive_restart) {
PUSH_DATA (push, info->restart_index);
if (info->restart_index > 65535)
- shorten = FALSE;
+ shorten = false;
} else {
BEGIN_NV04(push, NV50_3D(PRIM_RESTART_ENABLE), 1);
PUSH_DATA (push, 0);
PUSH_DATA (push, info->restart_index);
if (info->restart_index > 65535)
- shorten = FALSE;
+ shorten = false;
}
nv50_draw_elements(nv50, shorten,
return 0;
}
-boolean
+bool
nvc0_compute_validate_program(struct nvc0_context *nvc0)
{
struct nvc0_program *prog = nvc0->compprog;
if (prog->mem)
- return TRUE;
+ return true;
if (!prog->translated) {
prog->translated = nvc0_program_translate(
prog, nvc0->screen->base.device->chipset);
if (!prog->translated)
- return FALSE;
+ return false;
}
if (unlikely(!prog->code_size))
- return FALSE;
+ return false;
if (likely(prog->code_size)) {
if (nvc0_program_upload_code(nvc0, prog)) {
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
BEGIN_NVC0(push, NVC0_COMPUTE(FLUSH), 1);
PUSH_DATA (push, NVC0_COMPUTE_FLUSH_CODE);
- return TRUE;
+ return true;
}
}
- return FALSE;
+ return false;
}
-static boolean
+static bool
nvc0_compute_state_validate(struct nvc0_context *nvc0)
{
if (!nvc0_compute_validate_program(nvc0))
- return FALSE;
+ return false;
/* TODO: textures, samplers, surfaces, global memory buffers */
- nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, FALSE);
+ nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, false);
nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx_cp);
if (unlikely(nouveau_pushbuf_validate(nvc0->base.pushbuf)))
- return FALSE;
+ return false;
if (unlikely(nvc0->state.flushed))
- nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, TRUE);
+ nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, true);
- return TRUE;
+ return true;
}
#include "nv50/nv50_defs.xml.h"
#include "nvc0/nvc0_compute.xml.h"
-boolean
+bool
nvc0_compute_validate_program(struct nvc0_context *nvc0);
#endif /* NVC0_COMPUTE_H */
if (!nvc0->vtxbuf[i].buffer)
continue;
if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nvc0->base.vbo_dirty = TRUE;
+ nvc0->base.vbo_dirty = true;
}
if (nvc0->idxbuf.buffer &&
nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nvc0->base.vbo_dirty = TRUE;
+ nvc0->base.vbo_dirty = true;
for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) {
uint32_t valid = nvc0->constbuf_valid[s];
continue;
if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nvc0->cb_dirty = TRUE;
+ nvc0->cb_dirty = true;
}
}
}
if (screen) {
nouveau_fence_next(&screen->base);
- nouveau_fence_update(&screen->base, TRUE);
+ nouveau_fence_update(&screen->base, true);
if (screen->cur_ctx)
- screen->cur_ctx->state.flushed = TRUE;
+ screen->cur_ctx->state.flushed = true;
NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1);
}
}
void
nvc0_bufctx_fence(struct nvc0_context *nvc0, struct nouveau_bufctx *bufctx,
- boolean on_flush)
+ bool on_flush)
{
struct nouveau_list *list = on_flush ? &bufctx->current : &bufctx->pending;
struct nouveau_list *it;
struct nvc0_blitctx;
-boolean nvc0_blitctx_create(struct nvc0_context *);
+bool nvc0_blitctx_create(struct nvc0_context *);
void nvc0_blitctx_destroy(struct nvc0_context *);
struct nvc0_context {
struct nvc0_constbuf constbuf[6][NVC0_MAX_PIPE_CONSTBUFS];
uint16_t constbuf_dirty[6];
uint16_t constbuf_valid[6];
- boolean cb_dirty;
+ bool cb_dirty;
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs;
unsigned sample_mask;
unsigned min_samples;
- boolean vbo_push_hint;
+ bool vbo_push_hint;
uint8_t tfbbuf_dirty;
struct pipe_stream_output_target *tfbbuf[4];
unsigned num_tfbbufs;
struct pipe_query *cond_query;
- boolean cond_cond; /* inverted rendering condition */
+ bool cond_cond; /* inverted rendering condition */
uint cond_mode;
uint32_t cond_condmode; /* the calculated condition */
/* nvc0_context.c */
struct pipe_context *nvc0_create(struct pipe_screen *, void *);
void nvc0_bufctx_fence(struct nvc0_context *, struct nouveau_bufctx *,
- boolean on_flush);
+ bool on_flush);
void nvc0_default_kick_notify(struct nouveau_pushbuf *);
/* nvc0_draw.c */
extern struct draw_stage *nvc0_draw_render_stage(struct nvc0_context *);
/* nvc0_program.c */
-boolean nvc0_program_translate(struct nvc0_program *, uint16_t chipset);
-boolean nvc0_program_upload_code(struct nvc0_context *, struct nvc0_program *);
+bool nvc0_program_translate(struct nvc0_program *, uint16_t chipset);
+bool nvc0_program_upload_code(struct nvc0_context *, struct nvc0_program *);
void nvc0_program_destroy(struct nvc0_context *, struct nvc0_program *);
void nvc0_program_library_upload(struct nvc0_context *);
uint32_t nvc0_program_symbol_offset(const struct nvc0_program *,
void nvc0_query_fifo_wait(struct nouveau_pushbuf *, struct pipe_query *);
void nvc0_so_target_save_offset(struct pipe_context *,
struct pipe_stream_output_target *, unsigned i,
- boolean *serialize);
+ bool *serialize);
#define NVC0_QUERY_TFB_BUFFER_OFFSET (PIPE_QUERY_TYPES + 0)
/* nvc0_state_validate.c */
void nvc0_validate_global_residents(struct nvc0_context *,
struct nouveau_bufctx *, int bin);
-extern boolean nvc0_state_validate(struct nvc0_context *, uint32_t state_mask,
- unsigned space_words);
+extern bool nvc0_state_validate(struct nvc0_context *, uint32_t state_mask,
+ unsigned space_words);
/* nvc0_surface.c */
extern void nvc0_clear(struct pipe_context *, unsigned buffers,
extern void nvc0_init_surface_functions(struct nvc0_context *);
/* nvc0_tex.c */
-boolean nve4_validate_tsc(struct nvc0_context *nvc0, int s);
+bool nve4_validate_tsc(struct nvc0_context *nvc0, int s);
void nvc0_validate_textures(struct nvc0_context *);
void nvc0_validate_samplers(struct nvc0_context *);
void nve4_set_tex_handles(struct nvc0_context *);
#include "nvc0/nvc0_resource.h"
static uint32_t
-nvc0_tex_choose_tile_dims(unsigned nx, unsigned ny, unsigned nz, boolean is_3d)
+nvc0_tex_choose_tile_dims(unsigned nx, unsigned ny, unsigned nz, bool is_3d)
{
return nv50_tex_choose_tile_dims_helper(nx, ny, nz, is_3d);
}
static uint32_t
-nvc0_mt_choose_storage_type(struct nv50_miptree *mt, boolean compressed)
+nvc0_mt_choose_storage_type(struct nv50_miptree *mt, bool compressed)
{
const unsigned ms = util_logbase2(mt->base.base.nr_samples);
return tile_flags;
}
-static INLINE boolean
+static INLINE bool
nvc0_miptree_init_ms_mode(struct nv50_miptree *mt)
{
switch (mt->base.base.nr_samples) {
break;
default:
NOUVEAU_ERR("invalid nr_samples: %u\n", mt->base.base.nr_samples);
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
static void
struct nouveau_device *dev = nouveau_screen(pscreen)->device;
struct nv50_miptree *mt = CALLOC_STRUCT(nv50_miptree);
struct pipe_resource *pt = &mt->base.base;
- boolean compressed = dev->drm_version >= 0x01000101;
+ bool compressed = dev->drm_version >= 0x01000101;
int ret;
union nouveau_bo_config bo_config;
uint32_t bo_flags;
}
#endif
-boolean
+bool
nvc0_program_translate(struct nvc0_program *prog, uint16_t chipset)
{
struct nv50_ir_prog_info *info;
info = CALLOC_STRUCT(nv50_ir_prog_info);
if (!info)
- return FALSE;
+ return false;
info->type = prog->type;
info->target = chipset;
assert(info->bin.tlsSpace < (1 << 24));
prog->hdr[0] |= 1 << 26;
prog->hdr[1] |= align(info->bin.tlsSpace, 0x10); /* l[] size */
- prog->need_tls = TRUE;
+ prog->need_tls = true;
}
/* TODO: factor 2 only needed where joinat/precont is used,
* and we only have to count non-uniform branches
/*
if ((info->maxCFDepth * 2) > 16) {
prog->hdr[2] |= (((info->maxCFDepth * 2) + 47) / 48) * 0x200;
- prog->need_tls = TRUE;
+ prog->need_tls = true;
}
*/
if (info->io.globalAccess)
return !ret;
}
-boolean
+bool
nvc0_program_upload_code(struct nvc0_context *nvc0, struct nvc0_program *prog)
{
struct nvc0_screen *screen = nvc0->screen;
- const boolean is_cp = prog->type == PIPE_SHADER_COMPUTE;
+ const bool is_cp = prog->type == PIPE_SHADER_COMPUTE;
int ret;
uint32_t size = prog->code_size + (is_cp ? 0 : NVC0_SHADER_HEADER_SIZE);
uint32_t lib_pos = screen->lib_code->start;
ret = nouveau_heap_alloc(heap, size, prog, &prog->mem);
if (ret) {
NOUVEAU_ERR("shader too large (0x%x) to fit in code space ?\n", size);
- return FALSE;
+ return false;
}
IMMED_NVC0(nvc0->base.pushbuf, NVC0_3D(SERIALIZE), 0);
}
nv50_ir_relocate_code(prog->relocs, prog->code, code_pos, lib_pos, 0);
#ifdef DEBUG
- if (debug_get_bool_option("NV50_PROG_DEBUG", FALSE))
+ if (debug_get_bool_option("NV50_PROG_DEBUG", false))
nvc0_program_dump(prog);
#endif
BEGIN_NVC0(nvc0->base.pushbuf, NVC0_3D(MEM_BARRIER), 1);
PUSH_DATA (nvc0->base.pushbuf, 0x1011);
- return TRUE;
+ return true;
}
/* Upload code for builtin functions like integer division emulation. */
struct pipe_shader_state pipe;
ubyte type;
- boolean translated;
- boolean need_tls;
+ bool translated;
+ bool need_tls;
uint8_t num_gprs;
uint32_t *code;
uint8_t clip_enable; /* mask of defined clip planes */
uint8_t num_ucps; /* also set to max if ClipDistance is used */
uint8_t edgeflag; /* attribute index of edgeflag input */
- boolean need_vertex_id;
+ bool need_vertex_id;
} vp;
struct {
uint8_t early_z;
uint32_t base;
uint32_t offset; /* base + i * rotate */
uint8_t state;
- boolean is64bit;
+ bool is64bit;
uint8_t rotate;
int nesting; /* only used for occlusion queries */
union {
return (struct nvc0_query *)pipe;
}
-static boolean
+static bool
nvc0_query_allocate(struct nvc0_context *nvc0, struct nvc0_query *q, int size)
{
struct nvc0_screen *screen = nvc0->screen;
if (size) {
q->u.mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
if (!q->bo)
- return FALSE;
+ return false;
q->offset = q->base;
ret = nouveau_bo_map(q->bo, 0, screen->base.client);
if (ret) {
nvc0_query_allocate(nvc0, q, 0);
- return FALSE;
+ return false;
}
q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base);
}
- return TRUE;
+ return true;
}
static void
space = NVC0_QUERY_ALLOC_SPACE;
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
- q->is64bit = TRUE;
+ q->is64bit = true;
space = 512;
break;
case PIPE_QUERY_SO_STATISTICS:
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
- q->is64bit = TRUE;
+ q->is64bit = true;
space = 64;
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_PRIMITIVES_EMITTED:
- q->is64bit = TRUE;
+ q->is64bit = true;
q->index = index;
space = 32;
break;
struct nvc0_context *nvc0 = nvc0_context(pipe);
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
struct nvc0_query *q = nvc0_query(pq);
- boolean ret = true;
+ bool ret = true;
/* For occlusion queries we have to change the storage, because a previous
- * query might set the initial render conition to FALSE even *after* we re-
- * initialized it to TRUE.
+ * query might set the initial render conition to false even *after* we re-
+ * initialized it to true.
*/
if (q->rotate) {
nvc0_query_rotate(nvc0, q);
* query ?
*/
q->data[0] = q->sequence; /* initialize sequence */
- q->data[1] = 1; /* initial render condition = TRUE */
+ q->data[1] = 1; /* initial render condition = true */
q->data[4] = q->sequence + 1; /* for comparison COND_MODE */
q->data[5] = 0;
}
nvc0_query_get(push, q, 0x00, 0x0d005002 | (q->index << 5));
break;
case PIPE_QUERY_TIMESTAMP_DISJOINT:
- /* This query is not issued on GPU because disjoint is forced to FALSE */
+ /* This query is not issued on GPU because disjoint is forced to false */
q->state = NVC0_QUERY_STATE_READY;
break;
default:
struct nvc0_query *q = nvc0_query(pq);
uint64_t *res64 = (uint64_t*)result;
uint32_t *res32 = (uint32_t*)result;
- boolean *res8 = (boolean*)result;
+ uint8_t *res8 = (uint8_t*)result;
uint64_t *data64 = (uint64_t *)q->data;
unsigned i;
if (q->type >= NVC0_QUERY_DRV_STAT(0) &&
q->type <= NVC0_QUERY_DRV_STAT_LAST) {
res64[0] = q->u.value;
- return TRUE;
+ return true;
} else
#endif
if ((q->type >= NVE4_PM_QUERY(0) && q->type <= NVE4_PM_QUERY_LAST) ||
/* flush for silly apps that spin on GL_QUERY_RESULT_AVAILABLE */
PUSH_KICK(nvc0->base.pushbuf);
}
- return FALSE;
+ return false;
}
if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nvc0->screen->base.client))
- return FALSE;
+ return false;
NOUVEAU_DRV_STAT(&nvc0->screen->base, query_sync_count, 1);
}
q->state = NVC0_QUERY_STATE_READY;
switch (q->type) {
case PIPE_QUERY_GPU_FINISHED:
- res8[0] = TRUE;
+ res8[0] = true;
break;
case PIPE_QUERY_OCCLUSION_COUNTER: /* u32 sequence, u32 count, u64 time */
res64[0] = q->data[1] - q->data[5];
break;
case PIPE_QUERY_TIMESTAMP_DISJOINT:
res64[0] = 1000000000;
- res8[8] = FALSE;
+ res8[8] = false;
break;
case PIPE_QUERY_TIME_ELAPSED:
res64[0] = data64[1] - data64[3];
break;
default:
assert(0); /* can't happen, we don't create queries with invalid type */
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
void
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
struct nvc0_query *q;
uint32_t cond;
- boolean wait =
+ bool wait =
mode != PIPE_RENDER_COND_NO_WAIT &&
mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
cond = condition ? NVC0_3D_COND_MODE_EQUAL :
NVC0_3D_COND_MODE_NOT_EQUAL;
- wait = TRUE;
+ wait = true;
break;
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
void
nvc0_so_target_save_offset(struct pipe_context *pipe,
struct pipe_stream_output_target *ptarg,
- unsigned index, boolean *serialize)
+ unsigned index, bool *serialize)
{
struct nvc0_so_target *targ = nvc0_so_target(ptarg);
if (*serialize) {
- *serialize = FALSE;
+ *serialize = false;
PUSH_SPACE(nvc0_context(pipe)->base.pushbuf, 1);
IMMED_NVC0(nvc0_context(pipe)->base.pushbuf, NVC0_3D(SERIALIZE), 0);
{
struct nvc0_screen *screen = nvc0->screen;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
- const boolean is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
+ const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
const struct nvc0_mp_pm_query_cfg *cfg;
unsigned i, c;
unsigned num_ab[2] = { 0, 0 };
PUSH_SPACE(push, 4 * 8 * (is_nve4 ? 1 : 6) + 6);
if (!screen->pm.mp_counters_enabled) {
- screen->pm.mp_counters_enabled = TRUE;
+ screen->pm.mp_counters_enabled = true;
BEGIN_NVC0(push, SUBC_SW(0x06ac), 1);
PUSH_DATA (push, 0x1fcb);
}
struct nvc0_screen *screen = nvc0->screen;
struct pipe_context *pipe = &nvc0->base.pipe;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
- const boolean is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
+ const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
uint32_t mask;
uint32_t input[3];
const uint block[3] = { 32, is_nve4 ? 4 : 1, 1 };
if (unlikely(!screen->pm.prog)) {
struct nvc0_program *prog = CALLOC_STRUCT(nvc0_program);
prog->type = PIPE_SHADER_COMPUTE;
- prog->translated = TRUE;
+ prog->translated = true;
prog->num_gprs = 14;
prog->parm_size = 12;
if (is_nve4) {
}
}
-static INLINE boolean
+static INLINE bool
nvc0_mp_pm_query_read_data(uint32_t count[32][4],
- struct nvc0_context *nvc0, boolean wait,
+ struct nvc0_context *nvc0, bool wait,
struct nvc0_query *q,
const struct nvc0_mp_pm_query_cfg *cfg,
unsigned mp_count)
for (c = 0; c < cfg->num_counters; ++c) {
if (q->data[b + 8] != q->sequence) {
if (!wait)
- return FALSE;
+ return false;
if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nvc0->base.client))
- return FALSE;
+ return false;
}
count[p][c] = q->data[b + q->ctr[c]];
}
}
- return TRUE;
+ return true;
}
-static INLINE boolean
+static INLINE bool
nve4_mp_pm_query_read_data(uint32_t count[32][4],
- struct nvc0_context *nvc0, boolean wait,
+ struct nvc0_context *nvc0, bool wait,
struct nvc0_query *q,
const struct nvc0_mp_pm_query_cfg *cfg,
unsigned mp_count)
for (d = 0; d < ((q->ctr[c] & ~3) ? 1 : 4); ++d) {
if (q->data[b + 20 + d] != q->sequence) {
if (!wait)
- return FALSE;
+ return false;
if (nouveau_bo_wait(q->bo, NOUVEAU_BO_RD, nvc0->base.client))
- return FALSE;
+ return false;
}
if (q->ctr[c] & ~0x3)
count[p][c] = q->data[b + 16 + (q->ctr[c] & 3)];
}
}
}
- return TRUE;
+ return true;
}
/* Metric calculations:
unsigned mp_count = MIN2(nvc0->screen->mp_count_compute, 32);
unsigned p, c;
const struct nvc0_mp_pm_query_cfg *cfg;
- boolean ret;
+ bool ret;
cfg = nvc0_mp_pm_query_get_cfg(nvc0, q);
else
ret = nvc0_mp_pm_query_read_data(count, nvc0, wait, q, cfg, mp_count);
if (!ret)
- return FALSE;
+ return false;
if (cfg->op == NVC0_COUNTER_OPn_SUM) {
for (c = 0; c < cfg->num_counters; ++c)
}
*(uint64_t *)result = value;
- return TRUE;
+ return true;
}
int
unsigned bindings)
{
if (sample_count > 8)
- return FALSE;
+ return false;
if (!(0x117 & (1 << sample_count))) /* 0, 1, 2, 4 or 8 */
- return FALSE;
+ return false;
if (!util_format_is_supported(format, bindings))
- return FALSE;
+ return false;
if ((bindings & PIPE_BIND_SAMPLER_VIEW) && (target != PIPE_BUFFER))
if (util_format_get_blocksizebits(format) == 3 * 32)
- return FALSE;
+ return false;
/* transfers & shared are always supported */
bindings &= ~(PIPE_BIND_TRANSFER_READ |
/* Using COMPUTE has weird effects on 3D state, we need to
* investigate this further before enabling it by default.
*/
- if (debug_get_bool_option("NVC0_COMPUTE", FALSE))
+ if (debug_get_bool_option("NVC0_COMPUTE", false))
return nvc0_screen_compute_setup(screen, screen->base.pushbuf);
return 0;
case 0xe0:
}
}
-boolean
+bool
nvc0_screen_resize_tls_area(struct nvc0_screen *screen,
uint32_t lpos, uint32_t lneg, uint32_t cstack)
{
if (size >= (1 << 20)) {
NOUVEAU_ERR("requested TLS size too large: 0x%"PRIx64"\n", size);
- return FALSE;
+ return false;
}
size *= (screen->base.device->chipset >= 0xe0) ? 64 : 48; /* max warps */
NULL, &bo);
if (ret) {
NOUVEAU_ERR("failed to allocate TLS area, size: 0x%"PRIx64"\n", size);
- return FALSE;
+ return false;
}
nouveau_bo_ref(NULL, &screen->tls);
screen->tls = bo;
- return TRUE;
+ return true;
}
#define FAIL_SCREEN_INIT(str, err) \
BEGIN_NVC0(push, NVC0_3D(COND_MODE), 1);
PUSH_DATA (push, NVC0_3D_COND_MODE_ALWAYS);
- if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", TRUE)) {
+ if (debug_get_bool_option("NOUVEAU_SHADER_WATCHDOG", true)) {
/* kill shaders after about 1 second (at 100 MHz) */
BEGIN_NVC0(push, NVC0_3D(WATCHDOG_TIMER), 1);
PUSH_DATA (push, 0x17);
if (!nvc0_blitter_create(screen))
goto fail;
- nouveau_fence_new(&screen->base, &screen->base.fence.current, FALSE);
+ nouveau_fence_new(&screen->base, &screen->base.fence.current, false);
return pscreen;
struct nvc0_blitter;
struct nvc0_graph_state {
- boolean flushed;
- boolean rasterizer_discard;
- boolean early_z_forced;
- boolean prim_restart;
+ bool flushed;
+ bool rasterizer_discard;
+ bool early_z_forced;
+ bool prim_restart;
uint32_t instance_elts; /* bitmask of per-instance elements */
uint32_t instance_base;
uint32_t constant_vbos;
struct nvc0_program *prog; /* compute state object to read MP counters */
struct pipe_query *mp_counter[8]; /* counter to query allocation */
uint8_t num_mp_pm_active[2];
- boolean mp_counters_enabled;
+ bool mp_counters_enabled;
} pm;
struct nouveau_object *eng3d; /* sqrt(1/2)|kepler> + sqrt(1/2)|fermi> */
int nvc0_screen_get_driver_query_group_info(struct pipe_screen *, unsigned,
struct pipe_driver_query_group_info *);
-boolean nvc0_blitter_create(struct nvc0_screen *);
+bool nvc0_blitter_create(struct nvc0_screen *);
void nvc0_blitter_destroy(struct nvc0_screen *);
void nvc0_screen_make_buffers_resident(struct nvc0_screen *);
int nve4_screen_compute_setup(struct nvc0_screen *, struct nouveau_pushbuf *);
int nvc0_screen_compute_setup(struct nvc0_screen *, struct nouveau_pushbuf *);
-boolean nvc0_screen_resize_tls_area(struct nvc0_screen *, uint32_t lpos,
- uint32_t lneg, uint32_t cstack);
+bool nvc0_screen_resize_tls_area(struct nvc0_screen *, uint32_t lpos,
+ uint32_t lneg, uint32_t cstack);
static INLINE void
nvc0_resource_fence(struct nv04_resource *res, uint32_t flags)
}
}
-static INLINE boolean
+static INLINE bool
nvc0_program_validate(struct nvc0_context *nvc0, struct nvc0_program *prog)
{
if (prog->mem)
- return TRUE;
+ return true;
if (!prog->translated) {
prog->translated = nvc0_program_translate(
prog, nvc0->screen->base.device->chipset);
if (!prog->translated)
- return FALSE;
+ return false;
}
if (likely(prog->code_size))
return nvc0_program_upload_code(nvc0, prog);
- return TRUE; /* stream output info only */
+ return true; /* stream output info only */
}
void
/* we allow GPs with no code for specifying stream output state only */
if (gp && gp->code_size) {
- const boolean gp_selects_layer = !!(gp->hdr[13] & (1 << 9));
+ const bool gp_selects_layer = !!(gp->hdr[13] & (1 << 9));
BEGIN_NVC0(push, NVC0_3D(MACRO_GP_SELECT), 1);
PUSH_DATA (push, 0x41);
nvc0_query_pushbuf_submit(push, targ->pq, 0x4);
} else {
PUSH_DATA(push, 0); /* TFB_BUFFER_OFFSET */
- targ->clean = FALSE;
+ targ->clean = false;
}
}
for (; b < 4; ++b)
int r; /* reference */
uint32_t ms;
uint8_t blend_en = 0;
- boolean indep_masks = FALSE;
- boolean indep_funcs = FALSE;
+ bool indep_masks = false;
+ bool indep_funcs = false;
so->pipe = *cso;
cso->rt[i].alpha_func != cso->rt[r].alpha_func ||
cso->rt[i].alpha_src_factor != cso->rt[r].alpha_src_factor ||
cso->rt[i].alpha_dst_factor != cso->rt[r].alpha_dst_factor) {
- indep_funcs = TRUE;
+ indep_funcs = true;
break;
}
}
for (i = 1; i < 8; ++i) {
if (cso->rt[i].colormask != cso->rt[0].colormask) {
- indep_masks = TRUE;
+ indep_masks = true;
break;
}
}
pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, res);
- nvc0->constbuf[s][i].user = (cb && cb->user_buffer) ? TRUE : FALSE;
+ nvc0->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false;
if (nvc0->constbuf[s][i].user) {
nvc0->constbuf[s][i].u.data = cb->user_buffer;
nvc0->constbuf[s][i].size = MIN2(cb->buffer_size, 0x10000);
FREE(targ);
return NULL;
}
- targ->clean = TRUE;
+ targ->clean = true;
targ->pipe.buffer_size = size;
targ->pipe.buffer_offset = offset;
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
unsigned i;
- boolean serialize = TRUE;
+ bool serialize = true;
assert(num_targets <= 4);
for (i = 0; i < num_targets; ++i) {
- const boolean changed = nvc0->tfbbuf[i] != targets[i];
- const boolean append = (offsets[i] == ((unsigned)-1));
+ const bool changed = nvc0->tfbbuf[i] != targets[i];
+ const bool append = (offsets[i] == ((unsigned)-1));
if (!changed && append)
continue;
nvc0->tfbbuf_dirty |= 1 << i;
nvc0_so_target_save_offset(pipe, nvc0->tfbbuf[i], i, &serialize);
if (targets[i] && !append)
- nvc0_so_target(targets[i])->clean = TRUE;
+ nvc0_so_target(targets[i])->clean = true;
pipe_so_target_reference(&nvc0->tfbbuf[i], targets[i]);
}
struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
unsigned i, ms;
unsigned ms_mode = NVC0_3D_MULTISAMPLE_MODE_MS1;
- boolean serialize = FALSE;
+ bool serialize = false;
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_FB);
}
if (res->status & NOUVEAU_BUFFER_STATUS_GPU_READING)
- serialize = TRUE;
+ serialize = true;
res->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
ms_mode = mt->ms_mode;
if (mt->base.status & NOUVEAU_BUFFER_STATUS_GPU_READING)
- serialize = TRUE;
+ serialize = true;
mt->base.status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
mt->base.status &= ~NOUVEAU_BUFFER_STATUS_GPU_READING;
nvc0_validate_derived_1(struct nvc0_context *nvc0)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
- boolean rasterizer_discard;
+ bool rasterizer_discard;
if (nvc0->rast && nvc0->rast->pipe.rasterizer_discard) {
- rasterizer_discard = TRUE;
+ rasterizer_discard = true;
} else {
- boolean zs = nvc0->zsa &&
+ bool zs = nvc0->zsa &&
(nvc0->zsa->pipe.depth.enabled || nvc0->zsa->pipe.stencil[0].enabled);
rasterizer_discard = !zs &&
(!nvc0->fragprog || !nvc0->fragprog->hdr[18]);
};
#define validate_list_len (sizeof(validate_list) / sizeof(validate_list[0]))
-boolean
+bool
nvc0_state_validate(struct nvc0_context *nvc0, uint32_t mask, unsigned words)
{
uint32_t state_mask;
}
nvc0->dirty &= ~state_mask;
- nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, FALSE);
+ nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, false);
}
nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx_3d);
ret = nouveau_pushbuf_validate(nvc0->base.pushbuf);
if (unlikely(nvc0->state.flushed)) {
- nvc0->state.flushed = FALSE;
- nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, TRUE);
+ nvc0->state.flushed = false;
+ nvc0_bufctx_fence(nvc0, nvc0->bufctx_3d, true);
}
return !ret;
}
} u;
uint32_t size;
uint32_t offset;
- boolean user; /* should only be TRUE if u.data is valid and non-NULL */
+ bool user; /* should only be true if u.data is valid and non-NULL */
};
struct nvc0_vertex_element {
unsigned num_elements;
uint32_t instance_elts;
uint32_t instance_bufs;
- boolean shared_slots;
- boolean need_conversion; /* e.g. VFETCH cannot convert f64 to f32 */
+ bool shared_slots;
+ bool need_conversion; /* e.g. VFETCH cannot convert f64 to f32 */
unsigned size; /* size of vertex in bytes (when packed) */
struct nvc0_vertex_element element[0];
};
struct pipe_stream_output_target pipe;
struct pipe_query *pq;
unsigned stride;
- boolean clean;
+ bool clean;
};
static INLINE struct nvc0_so_target *
#include "nv50/nv50_blit.h"
static INLINE uint8_t
-nvc0_2d_format(enum pipe_format format, boolean dst, boolean dst_src_equal)
+nvc0_2d_format(enum pipe_format format, bool dst, bool dst_src_equal)
{
uint8_t id = nvc0_format_table[format].rt;
}
static int
-nvc0_2d_texture_set(struct nouveau_pushbuf *push, boolean dst,
+nvc0_2d_texture_set(struct nouveau_pushbuf *push, bool dst,
struct nv50_miptree *mt, unsigned level, unsigned layer,
- enum pipe_format pformat, boolean dst_src_pformat_equal)
+ enum pipe_format pformat, bool dst_src_pformat_equal)
{
struct nouveau_bo *bo = mt->base.bo;
uint32_t width, height, depth;
const enum pipe_format dfmt = dst->base.base.format;
const enum pipe_format sfmt = src->base.base.format;
int ret;
- boolean eqfmt = dfmt == sfmt;
+ bool eqfmt = dfmt == sfmt;
if (!PUSH_SPACE(push, 2 * 16 + 32))
return PIPE_ERROR;
- ret = nvc0_2d_texture_set(push, TRUE, dst, dst_level, dz, dfmt, eqfmt);
+ ret = nvc0_2d_texture_set(push, true, dst, dst_level, dz, dfmt, eqfmt);
if (ret)
return ret;
- ret = nvc0_2d_texture_set(push, FALSE, src, src_level, sz, sfmt, eqfmt);
+ ret = nvc0_2d_texture_set(push, false, src, src_level, sz, sfmt, eqfmt);
if (ret)
return ret;
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
int ret;
- boolean m2mf;
+ bool m2mf;
unsigned dst_layer = dstz, src_layer = src_box->z;
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
};
blit->vp.type = PIPE_SHADER_VERTEX;
- blit->vp.translated = TRUE;
+ blit->vp.translated = true;
if (blit->screen->base.class_3d >= GM107_3D_CLASS) {
blit->vp.code = (uint32_t *)code_gm107; /* const_cast */
blit->vp.code_size = sizeof(code_gm107);
int i;
uint32_t mode;
uint32_t mask = nv50_blit_eng2d_get_mask(info);
- boolean b;
+ bool b;
mode = nv50_blit_get_filter(info) ?
NV50_2D_BLIT_CONTROL_FILTER_BILINEAR :
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
- boolean eng3d = FALSE;
+ bool eng3d = false;
if (util_format_is_depth_or_stencil(info->dst.resource->format)) {
if (!(info->mask & PIPE_MASK_ZS))
return;
if (info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT ||
info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT)
- eng3d = TRUE;
+ eng3d = true;
if (info->filter != PIPE_TEX_FILTER_NEAREST)
- eng3d = TRUE;
+ eng3d = true;
} else {
if (!(info->mask & PIPE_MASK_RGBA))
return;
if (info->mask != PIPE_MASK_RGBA)
- eng3d = TRUE;
+ eng3d = true;
}
if (nv50_miptree(info->src.resource)->layout_3d) {
- eng3d = TRUE;
+ eng3d = true;
} else
if (info->src.box.depth != info->dst.box.depth) {
- eng3d = TRUE;
+ eng3d = true;
debug_printf("blit: cannot filter array or cube textures in z direction");
}
if (!eng3d && info->dst.format != info->src.format) {
if (!nv50_2d_dst_format_faithful(info->dst.format)) {
- eng3d = TRUE;
+ eng3d = true;
} else
if (!nv50_2d_src_format_faithful(info->src.format)) {
if (!util_format_is_luminance(info->src.format)) {
if (!nv50_2d_dst_format_ops_supported(info->dst.format))
- eng3d = TRUE;
+ eng3d = true;
else
if (util_format_is_intensity(info->src.format))
eng3d = info->src.format != PIPE_FORMAT_I8_UNORM;
}
} else
if (util_format_is_luminance_alpha(info->src.format))
- eng3d = TRUE;
+ eng3d = true;
}
if (info->src.resource->nr_samples == 8 &&
info->dst.resource->nr_samples <= 1)
- eng3d = TRUE;
+ eng3d = true;
#if 0
/* FIXME: can't make this work with eng2d anymore, at least not on nv50 */
if (info->src.resource->nr_samples > 1 ||
info->dst.resource->nr_samples > 1)
- eng3d = TRUE;
+ eng3d = true;
#endif
/* FIXME: find correct src coordinates adjustments */
if ((info->src.box.width != info->dst.box.width &&
info->src.box.width != -info->dst.box.width) ||
(info->src.box.height != info->dst.box.height &&
info->src.box.height != -info->dst.box.height))
- eng3d = TRUE;
+ eng3d = true;
if (nvc0->screen->num_occlusion_queries_active)
IMMED_NVC0(push, NVC0_3D(SAMPLECNT_ENABLE), 0);
{
}
-boolean
+bool
nvc0_blitter_create(struct nvc0_screen *screen)
{
screen->blitter = CALLOC_STRUCT(nvc0_blitter);
if (!screen->blitter) {
NOUVEAU_ERR("failed to allocate blitter struct\n");
- return FALSE;
+ return false;
}
screen->blitter->screen = screen;
nvc0_blitter_make_vp(screen->blitter);
nvc0_blitter_make_sampler(screen->blitter);
- return TRUE;
+ return true;
}
void
FREE(blitter);
}
-boolean
+bool
nvc0_blitctx_create(struct nvc0_context *nvc0)
{
nvc0->blit = CALLOC_STRUCT(nvc0_blitctx);
if (!nvc0->blit) {
NOUVEAU_ERR("failed to allocate blit context\n");
- return FALSE;
+ return false;
}
nvc0->blit->nvc0 = nvc0;
nvc0->blit->rast.pipe.half_pixel_center = 1;
- return TRUE;
+ return true;
}
void
NV50_TIC_0_MAPG__MASK | NV50_TIC_0_MAPR__MASK)
static INLINE uint32_t
-nv50_tic_swizzle(uint32_t tc, unsigned swz, boolean tex_int)
+nv50_tic_swizzle(uint32_t tc, unsigned swz, bool tex_int)
{
switch (swz) {
case PIPE_SWIZZLE_RED:
uint32_t depth;
struct nv50_tic_entry *view;
struct nv50_miptree *mt;
- boolean tex_int;
+ bool tex_int;
view = MALLOC_STRUCT(nv50_tic_entry);
if (!view)
default:
NOUVEAU_ERR("unexpected/invalid texture target: %d\n",
mt->base.base.target);
- return FALSE;
+ return false;
}
tic[3] = (flags & NV50_TEXVIEW_FILTER_MSAA8) ? 0x20000000 : 0x00300000;
return &view->pipe;
}
-static boolean
+static bool
nvc0_validate_tic(struct nvc0_context *nvc0, int s)
{
uint32_t commands[32];
struct nouveau_bo *txc = nvc0->screen->txc;
unsigned i;
unsigned n = 0;
- boolean need_flush = FALSE;
+ bool need_flush = false;
for (i = 0; i < nvc0->num_textures[s]; ++i) {
struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
struct nv04_resource *res;
- const boolean dirty = !!(nvc0->textures_dirty[s] & (1 << i));
+ const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
if (!tic) {
if (dirty)
BEGIN_NIC0(push, NVC0_M2MF(DATA), 8);
PUSH_DATAp(push, &tic->tic[0], 8);
- need_flush = TRUE;
+ need_flush = true;
} else
if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
return need_flush;
}
-static boolean
+static bool
nve4_validate_tic(struct nvc0_context *nvc0, unsigned s)
{
struct nouveau_bo *txc = nvc0->screen->txc;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
unsigned i;
- boolean need_flush = FALSE;
+ bool need_flush = false;
for (i = 0; i < nvc0->num_textures[s]; ++i) {
struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
struct nv04_resource *res;
- const boolean dirty = !!(nvc0->textures_dirty[s] & (1 << i));
+ const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
if (!tic) {
nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
PUSH_DATA (push, 0x1001);
PUSH_DATAp(push, &tic->tic[0], 8);
- need_flush = TRUE;
+ need_flush = true;
} else
if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
void nvc0_validate_textures(struct nvc0_context *nvc0)
{
- boolean need_flush;
+ bool need_flush;
if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
need_flush = nve4_validate_tic(nvc0, 0);
}
}
-static boolean
+static bool
nvc0_validate_tsc(struct nvc0_context *nvc0, int s)
{
uint32_t commands[16];
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
unsigned i;
unsigned n = 0;
- boolean need_flush = FALSE;
+ bool need_flush = false;
for (i = 0; i < nvc0->num_samplers[s]; ++i) {
struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]);
nvc0_m2mf_push_linear(&nvc0->base, nvc0->screen->txc,
65536 + tsc->id * 32, NV_VRAM_DOMAIN(&nvc0->screen->base),
32, tsc->tsc);
- need_flush = TRUE;
+ need_flush = true;
}
nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
return need_flush;
}
-boolean
+bool
nve4_validate_tsc(struct nvc0_context *nvc0, int s)
{
struct nouveau_bo *txc = nvc0->screen->txc;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
unsigned i;
- boolean need_flush = FALSE;
+ bool need_flush = false;
for (i = 0; i < nvc0->num_samplers[s]; ++i) {
struct nv50_tsc_entry *tsc = nv50_tsc_entry(nvc0->samplers[s][i]);
PUSH_DATA (push, 0x1001);
PUSH_DATAp(push, &tsc->tsc[0], 8);
- need_flush = TRUE;
+ need_flush = true;
}
nvc0->screen->tsc.lock[tsc->id / 32] |= 1 << (tsc->id % 32);
void nvc0_validate_samplers(struct nvc0_context *nvc0)
{
- boolean need_flush;
+ bool need_flush;
if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) {
need_flush = nve4_validate_tsc(nvc0, 0);
}
-static INLINE boolean
+static INLINE bool
nvc0_mt_transfer_can_map_directly(struct nv50_miptree *mt)
{
if (mt->base.domain == NOUVEAU_BO_VRAM)
- return FALSE;
+ return false;
if (mt->base.base.usage != PIPE_USAGE_STAGING)
- return FALSE;
+ return false;
return !nouveau_bo_memtype(mt->base.bo);
}
-static INLINE boolean
+static INLINE bool
nvc0_mt_sync(struct nvc0_context *nvc0, struct nv50_miptree *mt, unsigned usage)
{
if (!mt->base.mm) {
so->num_elements = num_elements;
so->instance_elts = 0;
so->instance_bufs = 0;
- so->shared_slots = FALSE;
- so->need_conversion = FALSE;
+ so->shared_slots = false;
+ so->need_conversion = false;
memset(so->vb_access_size, 0, sizeof(so->vb_access_size));
return NULL;
}
so->element[i].state = nvc0_format_table[fmt].vtx;
- so->need_conversion = TRUE;
+ so->need_conversion = true;
}
size = util_format_get_blocksize(fmt);
if (so->instance_elts || src_offset_max >= (1 << 14))
return so;
- so->shared_slots = TRUE;
+ so->shared_slots = true;
for (i = 0; i < num_elements; ++i) {
const unsigned b = elements[i].vertex_buffer_index;
PUSH_DATAh(push, address[b] + ve->src_offset);
PUSH_DATA (push, address[b] + ve->src_offset);
}
- nvc0->base.vbo_dirty = TRUE;
+ nvc0->base.vbo_dirty = true;
}
static void
uint32_t const_vbos;
unsigned i;
uint8_t vbo_mode;
- boolean update_vertex;
+ bool update_vertex;
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX);
{
struct nvc0_screen *screen = push->user_priv;
- nouveau_fence_update(&screen->base, TRUE);
+ nouveau_fence_update(&screen->base, true);
NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1);
}
}
static void
-nvc0_draw_elements(struct nvc0_context *nvc0, boolean shorten,
+nvc0_draw_elements(struct nvc0_context *nvc0, bool shorten,
unsigned mode, unsigned start, unsigned count,
unsigned instance_count, int32_t index_bias)
{
}
static INLINE void
-nvc0_update_prim_restart(struct nvc0_context *nvc0, boolean en, uint32_t index)
+nvc0_update_prim_restart(struct nvc0_context *nvc0, bool en, uint32_t index)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
continue;
if (res->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
- nvc0->cb_dirty = TRUE;
+ nvc0->cb_dirty = true;
}
}
if (nvc0->cb_dirty) {
IMMED_NVC0(push, NVC0_3D(MEM_BARRIER), 0x1011);
- nvc0->cb_dirty = FALSE;
+ nvc0->cb_dirty = false;
}
if (nvc0->state.vbo_mode) {
if (!nvc0->vtxbuf[i].buffer)
continue;
if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
- nvc0->base.vbo_dirty = TRUE;
+ nvc0->base.vbo_dirty = true;
}
if (!nvc0->base.vbo_dirty && nvc0->idxbuf.buffer &&
nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
- nvc0->base.vbo_dirty = TRUE;
+ nvc0->base.vbo_dirty = true;
nvc0_update_prim_restart(nvc0, info->primitive_restart, info->restart_index);
if (nvc0->base.vbo_dirty) {
if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
- nvc0->base.vbo_dirty = FALSE;
+ nvc0->base.vbo_dirty = false;
}
if (unlikely(info->indirect)) {
nvc0_draw_stream_output(nvc0, info);
} else
if (info->indexed) {
- boolean shorten = info->max_index <= 65535;
+ bool shorten = info->max_index <= 65535;
if (info->primitive_restart && info->restart_index > 65535)
- shorten = FALSE;
+ shorten = false;
nvc0_draw_elements(nvc0, shorten,
info->mode, info->start, info->count,
uint32_t restart_index;
uint32_t instance_id;
- boolean prim_restart;
- boolean need_vertex_id;
+ bool prim_restart;
+ bool need_vertex_id;
struct {
- boolean enabled;
- boolean value;
+ bool enabled;
+ bool value;
unsigned stride;
const uint8_t *data;
} edgeflag;
ctx->need_vertex_id =
nvc0->vertprog->vp.need_vertex_id && (nvc0->vertex->num_elements < 32);
- ctx->edgeflag.value = TRUE;
+ ctx->edgeflag.value = true;
ctx->edgeflag.enabled = nvc0->vertprog->vp.edgeflag < PIPE_MAX_ATTRIBS;
/* silence warnings */
return i;
}
-static INLINE boolean
+static INLINE bool
ef_value(const struct push_context *ctx, uint32_t index)
{
float *pf = (float *)&ctx->edgeflag.data[index * ctx->edgeflag.stride];
- return *pf ? TRUE : FALSE;
+ return *pf ? true : false;
}
-static INLINE boolean
+static INLINE bool
ef_toggle(struct push_context *ctx)
{
ctx->edgeflag.value = !ctx->edgeflag.value;
struct pipe_context *pipe = &nvc0->base.pipe;
struct nvc0_so_target *targ;
targ = nvc0_so_target(info->count_from_stream_output);
- pipe->get_query_result(pipe, targ->pq, TRUE, (void *)&vert_count);
+ pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
vert_count /= targ->stride;
}
ctx.idxbuf = NULL; /* shut up warnings */
static void
nve4_compute_validate_samplers(struct nvc0_context *nvc0)
{
- boolean need_flush = nve4_validate_tsc(nvc0, 5);
+ bool need_flush = nve4_validate_tsc(nvc0, 5);
if (need_flush) {
BEGIN_NVC0(nvc0->base.pushbuf, NVE4_COMPUTE(TSC_FLUSH), 1);
PUSH_DATA (nvc0->base.pushbuf, 0);
}
-static boolean
+static bool
nve4_compute_state_validate(struct nvc0_context *nvc0)
{
if (!nvc0_compute_validate_program(nvc0))
- return FALSE;
+ return false;
if (nvc0->dirty_cp & NVC0_NEW_CP_TEXTURES)
nve4_compute_validate_textures(nvc0);
if (nvc0->dirty_cp & NVC0_NEW_CP_SAMPLERS)
nvc0_validate_global_residents(nvc0,
nvc0->bufctx_cp, NVC0_BIND_CP_GLOBAL);
- nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, FALSE);
+ nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, false);
nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx_cp);
if (unlikely(nouveau_pushbuf_validate(nvc0->base.pushbuf)))
- return FALSE;
+ return false;
if (unlikely(nvc0->state.flushed))
- nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, TRUE);
+ nvc0_bufctx_fence(nvc0, nvc0->bufctx_cp, true);
- return TRUE;
+ return true;
}
for (i = 0; i < nvc0->num_textures[s]; ++i) {
struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
struct nv04_resource *res;
- const boolean dirty = !!(nvc0->textures_dirty[s] & (1 << i));
+ const bool dirty = !!(nvc0->textures_dirty[s] & (1 << i));
if (!tic) {
nvc0->tex_handles[s][i] |= NVE4_TIC_ENTRY_INVALID;
{
const uint32_t *data = (const uint32_t *)desc;
unsigned i;
- boolean zero = FALSE;
+ bool zero = false;
debug_printf("COMPUTE LAUNCH DESCRIPTOR:\n");
for (i = 0; i < sizeof(*desc); i += 4) {
if (data[i / 4]) {
debug_printf("[%x]: 0x%08x\n", i, data[i / 4]);
- zero = FALSE;
+ zero = false;
} else
if (!zero) {
debug_printf("...\n");
- zero = TRUE;
+ zero = true;
}
}
for (i = 0; i < 8; ++i) {
uint64_t address;
uint32_t size = desc->cb[i].size;
- boolean valid = !!(desc->cb_mask & (1 << i));
+ bool valid = !!(desc->cb_mask & (1 << i));
address = ((uint64_t)desc->cb[i].address_h << 32) | desc->cb[i].address_l;
pipe_static_mutex(nouveau_screen_mutex);
-boolean nouveau_drm_screen_unref(struct nouveau_screen *screen)
+bool nouveau_drm_screen_unref(struct nouveau_screen *screen)
{
int ret;
if (screen->refcount == -1)