cs->crelocs++;
radeon_bo_ref(bo);
+
return 0;
}
{
fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
+ fprintf(stderr, "cs->section_ndw = %d, cs->cdw = %d, cs->section_cdw = %d \n",
+ cs->section_ndw, cs->cdw, cs->section_cdw);
fprintf(stderr, "CS section end at (%s,%s,%d)\n",
file, func, line);
return -EPIPE;
/* drm_r300_cmd_header_t age; */
uint32_t length_dw_reloc_chunk;
uint64_t ull;
- uint64_t * chunk_ptrs[2];
+ uint64_t chunk_ptrs[2];
uint32_t reloc_chunk[128];
int r;
int retry = 0;
if (r) {
return 0;
}
-
+
/* raw ib chunk */
cs_chunk[0].chunk_id = RADEON_CHUNK_ID_IB;
cs_chunk[0].length_dw = cs->cdw;
- cs_chunk[0].chunk_data = (uint64_t)(cs->packets);
+ cs_chunk[0].chunk_data = (unsigned long)(cs->packets);
/* reloc chaunk */
cs_chunk[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
cs_chunk[1].length_dw = length_dw_reloc_chunk;
- cs_chunk[1].chunk_data = (uint64_t)&(reloc_chunk[0]);
+ cs_chunk[1].chunk_data = (unsigned long)&(reloc_chunk[0]);
- chunk_ptrs[0] = (uint64_t * )&(cs_chunk[0]);
- chunk_ptrs[1] = (uint64_t * )&(cs_chunk[1]);
+ chunk_ptrs[0] = (uint64_t)(unsigned long)&(cs_chunk[0]);
+ chunk_ptrs[1] = (uint64_t)(unsigned long)&(cs_chunk[1]);
cs_cmd.num_chunks = 2;
- cs_cmd.cs_id = 0;
- cs_cmd.chunks = (uint64_t)&(chunk_ptrs[0]);
+ /* cs_cmd.cs_id = 0; */
+ cs_cmd.chunks = (uint64_t)(unsigned long)chunk_ptrs;
/* dump_cmdbuf(cs); */
#define R600_OUT_BATCH_REGS(reg, num) \
do { \
if ((reg) >= R600_SET_CONFIG_REG_OFFSET && (reg) < R600_SET_CONFIG_REG_END) { \
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, (num))); \
- OUT_BATCH(((reg) - R600_SET_CONFIG_REG_OFFSET) >> 2); \
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, (num))); \
+ R600_OUT_BATCH(((reg) - R600_SET_CONFIG_REG_OFFSET) >> 2); \
} else if ((reg) >= R600_SET_CONTEXT_REG_OFFSET && (reg) < R600_SET_CONTEXT_REG_END) { \
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CONTEXT_REG, (num))); \
- OUT_BATCH(((reg) - R600_SET_CONTEXT_REG_OFFSET) >> 2); \
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONTEXT_REG, (num))); \
+ R600_OUT_BATCH(((reg) - R600_SET_CONTEXT_REG_OFFSET) >> 2); \
} else if ((reg) >= R600_SET_ALU_CONST_OFFSET && (reg) < R600_SET_ALU_CONST_END) { \
- OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (num))); \
- OUT_BATCH(((reg) - R600_SET_ALU_CONST_OFFSET) >> 2); \
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, (num))); \
+ R600_OUT_BATCH(((reg) - R600_SET_ALU_CONST_OFFSET) >> 2); \
} else if ((reg) >= R600_SET_RESOURCE_OFFSET && (reg) < R600_SET_RESOURCE_END) { \
- OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, (num))); \
- OUT_BATCH(((reg) - R600_SET_RESOURCE_OFFSET) >> 2); \
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, (num))); \
+ R600_OUT_BATCH(((reg) - R600_SET_RESOURCE_OFFSET) >> 2); \
} else if ((reg) >= R600_SET_SAMPLER_OFFSET && (reg) < R600_SET_SAMPLER_END) { \
- OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, (num))); \
- OUT_BATCH(((reg) - R600_SET_SAMPLER_OFFSET) >> 2); \
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_SAMPLER, (num))); \
+ R600_OUT_BATCH(((reg) - R600_SET_SAMPLER_OFFSET) >> 2); \
} else if ((reg) >= R600_SET_CTL_CONST_OFFSET && (reg) < R600_SET_CTL_CONST_END) { \
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, (num))); \
- OUT_BATCH(((reg) - R600_SET_CTL_CONST_OFFSET) >> 2); \
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, (num))); \
+ R600_OUT_BATCH(((reg) - R600_SET_CTL_CONST_OFFSET) >> 2); \
} else if ((reg) >= R600_SET_LOOP_CONST_OFFSET && (reg) < R600_SET_LOOP_CONST_END) { \
- OUT_BATCH(CP_PACKET3(R600_IT_SET_LOOP_CONST, (num))); \
- OUT_BATCH(((reg) - R600_SET_LOOP_CONST_OFFSET) >> 2); \
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_LOOP_CONST, (num))); \
+ R600_OUT_BATCH(((reg) - R600_SET_LOOP_CONST_OFFSET) >> 2); \
} else if ((reg) >= R600_SET_BOOL_CONST_OFFSET && (reg) < R600_SET_BOOL_CONST_END) { \
- OUT_BATCH(CP_PACKET3(R600_IT_SET_BOOL_CONST, (num))); \
- OUT_BATCH(((reg) - R600_SET_BOOL_CONST_OFFSET) >> 2); \
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_BOOL_CONST, (num))); \
+ R600_OUT_BATCH(((reg) - R600_SET_BOOL_CONST_OFFSET) >> 2); \
} else { \
- OUT_BATCH(CP_PACKET0((reg), (num))); \
+ R600_OUT_BATCH(CP_PACKET0((reg), (num))); \
} \
} while (0)
/** Single register write to command buffer; requires 3 dwords for most things. */
#define R600_OUT_BATCH_REGVAL(reg, val) \
R600_OUT_BATCH_REGS((reg), 1); \
- OUT_BATCH((val))
+ R600_OUT_BATCH((val))
/** Continuous register range write to command buffer; requires 1 dword,
* expects count dwords afterwards for register contents. */
return GL_TRUE;
}
+/* Clean our own things only, radeonDestroyContext will do every thing else. */
+void
+r600DestroyContext (__DRIcontextPrivate * driContextPriv)
+{
+ GET_CURRENT_CONTEXT (ctx);
+ context_t *context = R700_CONTEXT(ctx);
+
+ (context->chipobj.DestroyChipObj)(context->chipobj.pvChipObj);
+}
+
+
+
void *pvChipObj;
/* ------------ OUT ------------------- */
- GLboolean (*DestroyChipObj)(void* pvChipObj);
+ GLboolean (*DestroyChipObj)(GLcontext * ctx);
void (*InitFuncs)(struct dd_function_table *functions);
GLboolean (*EmitShader)( GLcontext * ctx,
void ** shaderbo,
GLvoid * data,
- int sizeinDWORD);
+ int sizeinDWORD,
+ char * szShaderUsage);
GLboolean (*DeleteShader)(GLcontext * ctx,
void * shaderbo);
void (*FreeDmaRegion)( GLcontext * ctx,
GLboolean r600EmitShader(GLcontext * ctx,
void ** shaderbo,
GLvoid * data,
- int sizeinDWORD)
+ int sizeinDWORD,
+ char * szShaderUsage)
{
radeonContextPtr radeonctx = RADEON_CONTEXT(ctx);
uint32_t *out;
shader_again_alloc:
+#ifdef RADEON_DEBUG_BO
pbo = radeon_bo_open(radeonctx->radeonScreen->bom,
0,
sizeinDWORD * 4,
256,
+ RADEON_GEM_DOMAIN_GTT,
+ 0,
+ szShaderUsage);
+#else
+ pbo = radeon_bo_open(radeonctx->radeonScreen->bom,
+ 0,
+ sizeinDWORD * 4,
+ 256,
RADEON_GEM_DOMAIN_GTT,
0);
+#endif /* RADEON_DEBUG_BO */
if (!pbo)
{
memcpy(out, data, sizeinDWORD * 4);
+ radeon_bo_unmap(pbo);
+
*shaderbo = (void*)pbo;
return GL_TRUE;
extern GLboolean r600EmitShader(GLcontext * ctx,
void ** shaderbo,
GLvoid * data,
- int sizeinDWORD);
+ int sizeinDWORD,
+ char * szShaderUsage);
extern GLboolean r600DeleteShader(GLcontext * ctx,
void * shaderbo);
BEGIN_BATCH(7);
OUT_BATCH_PACKET3(R600_PACKET3_3D_LOAD_VBPNTR, 2);
- OUT_BATCH(1);
- OUT_BATCH(vertex_size | (vertex_size << 8));
+ R600_OUT_BATCH(1);
+ R600_OUT_BATCH(vertex_size | (vertex_size << 8));
OUT_BATCH_RELOC(offset, bo, offset, RADEON_GEM_DOMAIN_GTT, 0, 0);
END_BATCH();
#endif /* to be enabled */
BEGIN_BATCH(3);
OUT_BATCH_PACKET3(R600_PACKET3_3D_DRAW_VBUF_2, 0);
- OUT_BATCH(R600_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (num_verts << 16) | type);
+ R600_OUT_BATCH(R600_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (num_verts << 16) | type);
END_BATCH();
#endif /* to be enabled */
}
extern const struct tnl_pipeline_stage *r700_pipeline[];
-static GLboolean r700DestroyChipObj(void* pvChipObj)
+static GLboolean r700DestroyChipObj(GLcontext * ctx)
{
+ context_t * context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700;
- if(NULL == pvChipObj)
+ if(NULL == context->chipobj.pvChipObj)
{
return GL_TRUE;
}
- r700 = (R700_CHIP_CONTEXT *)pvChipObj;
+ r700 = (R700_CHIP_CONTEXT *)(context->chipobj.pvChipObj);
+
+ if(0 != r700->pbo_vs_clear)
+ {
+ (context->chipobj.DeleteShader)(context, r700->pbo_vs_clear);
+ }
+
+ if(0 != r700->pbo_fs_clear)
+ {
+ (context->chipobj.DeleteShader)(context, r700->pbo_fs_clear);
+ }
FREE(r700->pStateList);
BEGIN_BATCH_NO_AUTOSTATE(9);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
- OUT_BATCH((nStreamID + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
+ R600_OUT_BATCH((nStreamID + SQ_FETCH_RESOURCE_VS_OFFSET) * FETCH_RESOURCE_STRIDE);
- R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD1_0,
+ R600_OUT_BATCH_RELOC(uSQ_VTX_CONSTANT_WORD0_0,
paos->bo,
- uSQ_VTX_CONSTANT_WORD1_0,
+ uSQ_VTX_CONSTANT_WORD0_0,
RADEON_GEM_DOMAIN_GTT, 0, 0, &offset_mod);
- OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
- OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
- OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
- OUT_BATCH(0);
- OUT_BATCH(0);
- OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
+ R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD1_0);
+ R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD2_0);
+ R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD3_0);
+ R600_OUT_BATCH(0);
+ R600_OUT_BATCH(0);
+ R600_OUT_BATCH(uSQ_VTX_CONSTANT_WORD6_0);
END_BATCH();
COMMIT_BATCH();
unsigned int i;
BEGIN_BATCH_NO_AUTOSTATE(6);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
- OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
- OUT_BATCH(0);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
+ R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
+ R600_OUT_BATCH(0);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
- OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
- OUT_BATCH(0);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
+ R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
+ R600_OUT_BATCH(0);
END_BATCH();
COMMIT_BATCH();
return R600_FALLBACK_NONE;
}
+inline GLboolean needRelocReg(context_t *context, unsigned int reg)
+{
+ switch (reg + ASIC_CONTEXT_BASE_INDEX)
+ {
+ case mmCB_COLOR0_BASE:
+ case mmCB_COLOR1_BASE:
+ case mmCB_COLOR2_BASE:
+ case mmCB_COLOR3_BASE:
+ case mmCB_COLOR4_BASE:
+ case mmCB_COLOR5_BASE:
+ case mmCB_COLOR6_BASE:
+ case mmCB_COLOR7_BASE:
+ case mmDB_DEPTH_BASE:
+ case mmSQ_PGM_START_VS:
+ case mmSQ_PGM_START_FS:
+ case mmSQ_PGM_START_ES:
+ case mmSQ_PGM_START_GS:
+ case mmSQ_PGM_START_PS:
+ return GL_TRUE;
+ break;
+ }
+
+ return GL_FALSE;
+}
+
inline GLboolean setRelocReg(context_t *context, unsigned int reg,
- void * pbo_vs, void * pbo_fs)
+ GLboolean bUseStockShader)
{
BATCH_LOCALS(&context->radeon);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
return GL_FALSE;
}
- offset_mod.shift = RIGHT_SHIFT;
- offset_mod.shiftbits = 8;
- offset_mod.mask = 0x00FFFFFF;
+ /* refer to radeonCreateScreen : screen->fbLocation = (temp & 0xffff) << 16; */
+ offset_mod.shift = NO_SHIFT;
+ offset_mod.shiftbits = 0;
+ offset_mod.mask = 0xFFFFFFFF;
R600_OUT_BATCH_RELOC(r700->CB_COLOR0_BASE.u32All,
rrb->bo,
struct radeon_renderbuffer *rrb;
rrb = radeon_get_depthbuffer(&context->radeon);
- offset_mod.shift = RIGHT_SHIFT;
- offset_mod.shiftbits = 8;
- offset_mod.mask = 0x00FFFFFF;
+ offset_mod.shift = NO_SHIFT;
+ offset_mod.shiftbits = 0;
+ offset_mod.mask = 0xFFFFFFFF;
R600_OUT_BATCH_RELOC(r700->DB_DEPTH_BASE.u32All,
rrb->bo,
break;
case mmSQ_PGM_START_VS:
{
- if(NULL != pbo_vs)
+ if(GL_TRUE == bUseStockShader)
{
- pbo = (struct radeon_bo *)pbo_vs;
+ if(NULL != r700->pbo_vs_clear)
+ {
+ pbo = (struct radeon_bo *)(r700->pbo_vs_clear);
+ }
+ else
+ {
+ return GL_FALSE;
+ }
}
else
{
pbo = (struct radeon_bo *)r700GetActiveVpShaderBo(GL_CONTEXT(context));
}
- offset_mod.shift = RIGHT_SHIFT;
- offset_mod.shiftbits = 8;
- offset_mod.mask = 0x00FFFFFF;
+ offset_mod.shift = NO_SHIFT;
+ offset_mod.shiftbits = 0;
+ offset_mod.mask = 0xFFFFFFFF;
R600_OUT_BATCH_RELOC(r700->SQ_PGM_START_VS.u32All,
pbo,
case mmSQ_PGM_START_GS:
case mmSQ_PGM_START_PS:
{
- if(NULL != pbo_fs)
+ if(GL_TRUE == bUseStockShader)
{
- pbo = (struct radeon_bo *)pbo_fs;
+ if(NULL != r700->pbo_fs_clear)
+ {
+ pbo = (struct radeon_bo *)(r700->pbo_fs_clear);
+ }
+ else
+ {
+ return GL_FALSE;
+ }
}
else
{
pbo = (struct radeon_bo *)r700GetActiveFpShaderBo(GL_CONTEXT(context));
}
- offset_mod.shift = RIGHT_SHIFT;
- offset_mod.shiftbits = 8;
- offset_mod.mask = 0x00FFFFFF;
+ offset_mod.shift = NO_SHIFT;
+ offset_mod.shiftbits = 0;
+ offset_mod.mask = 0xFFFFFFFF;
voffset = 0;
R600_OUT_BATCH_RELOC(r700->SQ_PGM_START_PS.u32All,
return GL_FALSE;
}
-GLboolean r700SendContextStates(context_t *context, void * pbo_vs, void * pbo_fs)
+GLboolean r700SendContextStates(context_t *context, GLboolean bUseStockShader)
{
BATCH_LOCALS(&context->radeon);
pInit = pState;
- while(NULL != pState->pNext)
+ if(GL_FALSE == needRelocReg(context, pState->unOffset))
{
- if( (pState->pNext->unOffset - pState->unOffset) > 1 )
+ while(NULL != pState->pNext)
{
- break;
- }
- else
- {
- pState = pState->pNext;
- toSend++;
- }
- };
+ if( ((pState->pNext->unOffset - pState->unOffset) > 1)
+ || (GL_TRUE == needRelocReg(context, pState->pNext->unOffset)) )
+ {
+ break;
+ }
+ else
+ {
+ pState = pState->pNext;
+ toSend++;
+ }
+ };
+ }
pState = pState->pNext;
R600_OUT_BATCH_REGSEQ(((pInit->unOffset + ASIC_CONTEXT_BASE_INDEX)<<2), toSend);
for(ui=0; ui<toSend; ui++)
{
- if( GL_FALSE == setRelocReg(context, (pInit->unOffset+ui), pbo_vs, pbo_fs) )
+ if( GL_FALSE == setRelocReg(context, (pInit->unOffset+ui), bUseStockShader) )
{
/* for not reloc reg. */
- OUT_BATCH(*(pInit->puiValue));
+ R600_OUT_BATCH(*(pInit->puiValue));
}
pInit = pInit->pNext;
};
R700_TEXTURE_STATES texture_states;
+ void * pbo_vs_clear;
+ void * pbo_fs_clear;
GLboolean bEnablePerspective;
} R700_CHIP_CONTEXT;
#define R700_CONTEXT_STATES(context) ((R700_CHIP_CONTEXT *)(context->chipobj.pvChipObj))
extern GLboolean r700InitChipObject(context_t *context);
-extern GLboolean r700SendContextStates(context_t *context, void * pbo_vs, void * pbo_fs);
+extern GLboolean r700SendContextStates(context_t *context, GLboolean bUseStockShader);
extern int r700SetupStreams(GLcontext * ctx);
extern void r700SetupVTXConstans(GLcontext * ctx,
unsigned int nStreamID,
/* Setup vb */
BEGIN_BATCH_NO_AUTOSTATE(6);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
- OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
- OUT_BATCH(0);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
+ R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
+ R600_OUT_BATCH(0);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
- OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
- OUT_BATCH(0);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
+ R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
+ R600_OUT_BATCH(0);
END_BATCH();
COMMIT_BATCH();
(context->chipobj.EmitVec)(ctx, &aos_vb, (GLvoid *)fVb, 4, 16, 6);
+
r700SetupVTXConstans(ctx, VERT_ATTRIB_POS, &aos_vb, 4, 16, 6);
/* Setup shaders, copied from dump */
SETbit(r700->SQ_PGM_RESOURCES_PS.u32All, PGM_RESOURCES__PRIME_CACHE_ON_DRAW_bit);
SETbit(r700->SQ_PGM_RESOURCES_VS.u32All, PGM_RESOURCES__PRIME_CACHE_ON_DRAW_bit);
/* vs */
- (context->chipobj.EmitShader)(ctx, &pbo_vs, (GLvoid *)(&uVs[0]), 28);
+ if(0 == r700->pbo_vs_clear)
+ {
+ (context->chipobj.EmitShader)(ctx, &(r700->pbo_vs_clear), (GLvoid *)(&uVs[0]), 28, "Clr VS");
+ }
+
r700->SQ_PGM_START_VS.u32All = 0;
r700->SQ_PGM_RESOURCES_VS.u32All = 0x00800004;
/* vs const */ /* TODO : Set color here */
BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, 4));
- OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
- OUT_BATCH(*((unsigned int*)&(ctx->Color.ClearColor[0])));
- OUT_BATCH(*((unsigned int*)&(ctx->Color.ClearColor[1])));
- OUT_BATCH(*((unsigned int*)&(ctx->Color.ClearColor[2])));
- OUT_BATCH(*((unsigned int*)&(ctx->Color.ClearColor[3])));
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, 4));
+ R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
+ R600_OUT_BATCH(*((unsigned int*)&(ctx->Color.ClearColor[0])));
+ R600_OUT_BATCH(*((unsigned int*)&(ctx->Color.ClearColor[1])));
+ R600_OUT_BATCH(*((unsigned int*)&(ctx->Color.ClearColor[2])));
+ R600_OUT_BATCH(*((unsigned int*)&(ctx->Color.ClearColor[3])));
END_BATCH();
COMMIT_BATCH();
r700->SPI_VS_OUT_CONFIG.u32All = 0x00000000;
r700->SPI_PS_IN_CONTROL_0.u32All = 0x20000001;
/* ps */
- (context->chipobj.EmitShader)(ctx, &pbo_fs, (GLvoid *)(&uFs[0]), 12);
+ if(0 == r700->pbo_fs_clear)
+ {
+ (context->chipobj.EmitShader)(ctx, &(r700->pbo_fs_clear), (GLvoid *)(&uFs[0]), 12, "Clr PS");
+ }
+
r700->SQ_PGM_START_PS.u32All = 0;
r700->SQ_PGM_RESOURCES_PS.u32All = 0x00800002;
r700->SQ_PGM_EXPORTS_PS.u32All = 0x00000002;
r700->SQ_PGM_START_GS.u32All = 0;
/* Now, send the states */
- r700SendContextStates(context, pbo_vs, pbo_fs);
+ r700SendContextStates(context, GL_TRUE);
/* Draw */
GLuint numEntires, j;
SETfield(VGT_INDEX_TYPE, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
- OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
- OUT_BATCH(VGT_INDEX_TYPE);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+ R600_OUT_BATCH(VGT_INDEX_TYPE);
VGT_NUM_INDICES = numIndices;
SETfield(VGT_PRIMITIVE_TYPE, DI_PT_TRILIST, VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
- OUT_BATCH(mmVGT_PRIMITIVE_TYPE - ASIC_CONFIG_BASE_INDEX);
- OUT_BATCH(VGT_PRIMITIVE_TYPE);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+ R600_OUT_BATCH(mmVGT_PRIMITIVE_TYPE - ASIC_CONFIG_BASE_INDEX);
+ R600_OUT_BATCH(VGT_PRIMITIVE_TYPE);
SETfield(VGT_DRAW_INITIATOR, DI_SRC_SEL_IMMEDIATE, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
SETfield(VGT_DRAW_INITIATOR, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
- OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (numIndices + 1)));
- OUT_BATCH(VGT_NUM_INDICES);
- OUT_BATCH(VGT_DRAW_INITIATOR);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (numIndices + 1)));
+ R600_OUT_BATCH(VGT_NUM_INDICES);
+ R600_OUT_BATCH(VGT_DRAW_INITIATOR);
for (j=0; j<numIndices; j++)
{
- OUT_BATCH(j);
+ R600_OUT_BATCH(j);
}
END_BATCH();
COMMIT_BATCH();
(context->chipobj.FlushCmdBuffer)(context);
- /* TODO : keep these in context, don't load and release every time. */
- (context->chipobj.DeleteShader)(context, &pbo_vs);
-
- (context->chipobj.DeleteShader)(context, &pbo_fs);
-
(context->chipobj.FreeDmaRegion)(context, aos_vb.bo);
/* Restore chip object. */
(context->chipobj.EmitShader)(ctx,
&(fp->shaderbo),
(GLvoid *)(fp->r700Shader.pProgram),
- fp->r700Shader.uShaderBinaryDWORDSize);
+ fp->r700Shader.uShaderBinaryDWORDSize,
+ "FS");
fp->loaded = GL_TRUE;
}
BEGIN_BATCH_NO_AUTOSTATE(2 + unNumParamData);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, unNumParamData));
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, unNumParamData));
/* assembler map const from very beginning. */
- OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
+ R600_OUT_BATCH(SQ_ALU_CONSTANT_PS_OFFSET * 4);
unNumParamData = paramList->NumParameters;
for(ui=0; ui<unNumParamData; ui++)
{
- OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][0])));
- OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][1])));
- OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][2])));
- OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][3])));
+ R600_OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][0])));
+ R600_OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][1])));
+ R600_OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][2])));
+ R600_OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][3])));
}
END_BATCH();
COMMIT_BATCH();
if (radeon->dma.flush)
radeon->dma.flush( ctx );
- r700SendContextStates(context, NULL, NULL);
+ r700SendContextStates(context, GL_FALSE);
if (radeon->cmdbuf.cs->cdw)
rcommonFlushCmdBuf(radeon, __FUNCTION__);
BATCH_LOCALS(&context->radeon);
BEGIN_BATCH_NO_AUTOSTATE(3);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
- OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
- OUT_BATCH(1 << 15);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+ R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
+ R600_OUT_BATCH(1 << 15);
END_BATCH();
COMMIT_BATCH();
BATCH_LOCALS(&context->radeon);
BEGIN_BATCH_NO_AUTOSTATE(5);
- OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
- OUT_BATCH(0x16);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+ R600_OUT_BATCH(0x16);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
- OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
- OUT_BATCH(1 << 17);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+ R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
+ R600_OUT_BATCH(1 << 17);
END_BATCH();
COMMIT_BATCH();
if (context->radeon.radeonScreen->chip_family <= CHIP_FAMILY_RV670)
{
BEGIN_BATCH_NO_AUTOSTATE(2);
- OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF, 1));
- OUT_BATCH(0);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF, 1));
+ R600_OUT_BATCH(0);
END_BATCH();
}
BEGIN_BATCH_NO_AUTOSTATE(3);
- OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL, 1));
- OUT_BATCH(0x80000000);
- OUT_BATCH(0x80000000);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL, 1));
+ R600_OUT_BATCH(0x80000000);
+ R600_OUT_BATCH(0x80000000);
END_BATCH();
COMMIT_BATCH();
BEGIN_BATCH_NO_AUTOSTATE(5);
- OUT_BATCH(CP_PACKET3((IT_SURFACE_SYNC << 8), 3)));
- OUT_BATCH(CP_COHER_CNTL);
- OUT_BATCH(0xFFFFFFFF);
- OUT_BATCH(0x00000000);
- OUT_BATCH(10);
+ R600_OUT_BATCH(CP_PACKET3((IT_SURFACE_SYNC << 8), 3)));
+ R600_OUT_BATCH(CP_COHER_CNTL);
+ R600_OUT_BATCH(0xFFFFFFFF);
+ R600_OUT_BATCH(0x00000000);
+ R600_OUT_BATCH(10);
END_BATCH();
COMMIT_BATCH();
{
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(context->chipobj.pvChipObj);
-
+#if 1
BATCH_LOCALS(&context->radeon);
unsigned int i, j;
r700->SQ_PGM_START_ES.u32All = r700->SQ_PGM_START_PS.u32All;
r700->SQ_PGM_START_GS.u32All = r700->SQ_PGM_START_PS.u32All;
- r700SendContextStates(context, NULL, NULL);
+ r700SendContextStates(context, GL_FALSE);
/* richard test code */
for (i = 0; i < vb->PrimitiveCount; i++)
numEntires = 2 /* VGT_INDEX_TYPE */
+ 3 /* VGT_PRIMITIVE_TYPE */
- + numIndices + 3 /* DRAW_INDEX_IMMD */
- + 2; /* test stamp */
+ + numIndices + 3; /* DRAW_INDEX_IMMD */
BEGIN_BATCH_NO_AUTOSTATE(numEntires);
VGT_INDEX_TYPE |= DI_INDEX_SIZE_32_BIT << INDEX_TYPE_shift;
- OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
- OUT_BATCH(VGT_INDEX_TYPE);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+ R600_OUT_BATCH(VGT_INDEX_TYPE);
VGT_NUM_INDICES = numIndices;
VGT_PRIMITIVE_TYPE |= r700PrimitiveType(prim) << VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift;
- OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
- OUT_BATCH(mmVGT_PRIMITIVE_TYPE - ASIC_CONFIG_BASE_INDEX);
- OUT_BATCH(VGT_PRIMITIVE_TYPE);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+ R600_OUT_BATCH(mmVGT_PRIMITIVE_TYPE - ASIC_CONFIG_BASE_INDEX);
+ R600_OUT_BATCH(VGT_PRIMITIVE_TYPE);
VGT_DRAW_INITIATOR |= DI_SRC_SEL_IMMEDIATE << SOURCE_SELECT_shift;
VGT_DRAW_INITIATOR |= DI_MAJOR_MODE_0 << MAJOR_MODE_shift;
- OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (numIndices + 1)));
- OUT_BATCH(VGT_NUM_INDICES);
- OUT_BATCH(VGT_DRAW_INITIATOR);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (numIndices + 1)));
+ R600_OUT_BATCH(VGT_NUM_INDICES);
+ R600_OUT_BATCH(VGT_DRAW_INITIATOR);
for (j=0; j<numIndices; j++)
{
- OUT_BATCH(j);
+ R600_OUT_BATCH(j);
}
END_BATCH();
COMMIT_BATCH();
/* Flush render op cached for last several quads. */
BEGIN_BATCH_NO_AUTOSTATE(2);
- OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
- OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT);
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+ R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT);
END_BATCH();
COMMIT_BATCH();
R600_OUT_BATCH_REGVAL((0x2144 << 2), 0x56785678);
END_BATCH();
COMMIT_BATCH();
-
+#endif //0
rcommonFlushCmdBuf( &context->radeon, __FUNCTION__ );
return GL_FALSE;
nPitchInPixel = rrb->pitch/rrb->cpp;
SETfield(r700->CB_COLOR0_SIZE.u32All, (nPitchInPixel/8)-1,
PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
- SETfield(r700->CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * rrb->base.Height)/64 )-1,
+ SETfield(r700->CB_COLOR0_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask);
r700->CB_COLOR0_BASE.u32All = 0;
SETfield(r700->CB_COLOR0_INFO.u32All, ENDIAN_NONE, ENDIAN_shift, ENDIAN_mask);
SETfield(r700->DB_DEPTH_SIZE.u32All, (nPitchInPixel/8)-1,
PITCH_TILE_MAX_shift, PITCH_TILE_MAX_mask);
- SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * rrb->base.Height)/64 )-1,
+ SETfield(r700->DB_DEPTH_SIZE.u32All, ( (nPitchInPixel * context->radeon.radeonScreen->driScreen->fbHeight)/64 )-1,
SLICE_TILE_MAX_shift, SLICE_TILE_MAX_mask); /* size in pixel / 64 - 1 */
if(4 == rrb->cpp)
r700->CB_COLOR0_FRAG.u32All = 0;
r700->CB_COLOR0_MASK.u32All = 0;
- r700->PA_SC_VPORT_ZMAX_0.u32All = 0x3F800000;
+ r700->PA_SC_VPORT_ZMAX_0.u32All = 0x3F800000;
}
void r700InitStateFuncs(struct dd_function_table *functions) //-----------------
(context->chipobj.EmitShader)(ctx,
&(vp->shaderbo),
(GLvoid *)(vp->r700Shader.pProgram),
- vp->r700Shader.uShaderBinaryDWORDSize);
+ vp->r700Shader.uShaderBinaryDWORDSize,\r
+ "VS");
vp->loaded = GL_TRUE;
}
BEGIN_BATCH_NO_AUTOSTATE(unNumParamData + 2);
- OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, unNumParamData));
+ R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_ALU_CONST, unNumParamData));
/* assembler map const from very beginning. */
- OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
+ R600_OUT_BATCH(SQ_ALU_CONSTANT_VS_OFFSET * 4);
unNumParamData = paramList->NumParameters;
for(ui=0; ui<unNumParamData; ui++)
{
- OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][0])));
- OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][1])));
- OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][2])));
- OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][3])));
+ R600_OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][0])));
+ R600_OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][1])));
+ R600_OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][2])));
+ R600_OUT_BATCH(*((unsigned int*)&(paramList->ParameterValues[ui][3])));
}
END_BATCH();
COMMIT_BATCH();
radeonContextPtr radeon = (radeonContextPtr) driContextPriv->driverPrivate;
radeonContextPtr current = ctx ? RADEON_CONTEXT(ctx) : NULL;
+ /* +r6/r7 */
+ __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
+ radeonScreenPtr screen = (radeonScreenPtr) (sPriv->private);
+ /* --------- */
+
if (radeon == current) {
radeon_firevertices(radeon);
_mesa_make_current(NULL, NULL, NULL);
assert(radeon);
if (radeon) {
+#if RADEON_COMMON && defined(RADEON_COMMON_FOR_R600) /* +r6/r7 */
+ if (IS_R600_CLASS(screen))
+ {
+ r600DestroyContext(driContextPriv);
+ }
+#endif
+
if (radeon->dma.current) {
rcommonFlushCmdBuf( radeon, __FUNCTION__ );
}
screen->AGPMode = dri_priv->AGPMode;
ret = radeonGetParam(sPriv, RADEON_PARAM_FB_LOCATION, &temp);
- if (ret) {
- if (screen->chip_family < CHIP_FAMILY_RS600 && !screen->kernel_mm)
- screen->fbLocation = ( INREG( RADEON_MC_FB_LOCATION ) & 0xffff) << 16;
- else {
- FREE( screen );
- fprintf(stderr, "Unable to get fb location need newer drm\n");
- return NULL;
+
+ /* +r6/r7 */
+ if(screen->chip_family >= CHIP_FAMILY_R600)
+ {
+ if (ret)
+ {
+ FREE( screen );
+ fprintf(stderr, "Unable to get fb location need newer drm\n");
+ return NULL;
}
- } else {
- screen->fbLocation = (temp & 0xffff) << 16;
+ else
+ {
+ screen->fbLocation = (temp & 0xffff) << 24;
+ }
+ }
+ else
+ {
+ if (ret)
+ {
+ if (screen->chip_family < CHIP_FAMILY_RS600 && !screen->kernel_mm)
+ screen->fbLocation = ( INREG( RADEON_MC_FB_LOCATION ) & 0xffff) << 16;
+ else
+ {
+ FREE( screen );
+ fprintf(stderr, "Unable to get fb location need newer drm\n");
+ return NULL;
+ }
+ }
+ else
+ {
+ screen->fbLocation = (temp & 0xffff) << 16;
+ }
}
if (IS_R300_CLASS(screen)) {