Newer
Older
LLMemType mt2(LLMemType::MTYPE_VERTEX_UPDATE_INDICES);
mRequestedNumIndices = nindices;
if (!mDynamicSize)
{
mNumIndices = nindices;
}
else if (mUsage == GL_STATIC_DRAW_ARB ||
nindices > mNumIndices ||
nindices < mNumIndices/2)
{
if (mUsage != GL_STATIC_DRAW_ARB)
{
nindices += nindices/4;
}
mNumIndices = nindices;
}
}
void LLVertexBuffer::allocateBuffer(S32 nverts, S32 nindices, bool create)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_ALLOCATE_BUFFER);
if (nverts < 0 || nindices < 0 ||
nverts > 65536)
{
llerrs << "Bad vertex buffer allocation: " << nverts << " : " << nindices << llendl;
}
updateNumVerts(nverts);
updateNumIndices(nindices);
if (mMappedData)
{
llerrs << "LLVertexBuffer::allocateBuffer() called redundantly." << llendl;
}
if (create && (nverts || nindices))
{
createGLBuffer();
createGLIndices();
}
sAllocatedBytes += getSize() + getIndicesSize();
}
void LLVertexBuffer::resizeBuffer(S32 newnverts, S32 newnindices)
{
mRequestedNumVerts = newnverts;
mRequestedNumIndices = newnindices;
LLMemType mt2(LLMemType::MTYPE_VERTEX_RESIZE_BUFFER);
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
mDynamicSize = TRUE;
if (mUsage == GL_STATIC_DRAW_ARB)
{ //always delete/allocate static buffers on resize
destroyGLBuffer();
destroyGLIndices();
allocateBuffer(newnverts, newnindices, TRUE);
mFinal = FALSE;
}
else if (newnverts > mNumVerts || newnindices > mNumIndices ||
newnverts < mNumVerts/2 || newnindices < mNumIndices/2)
{
sAllocatedBytes -= getSize() + getIndicesSize();
updateNumVerts(newnverts);
updateNumIndices(newnindices);
S32 newsize = getSize();
S32 new_index_size = getIndicesSize();
sAllocatedBytes += newsize + new_index_size;
if (newsize)
{
if (!mGLBuffer)
{ //no buffer exists, create a new one
createGLBuffer();
}
else
{
if (!useVBOs())
{
mMappedData = (U8*)ALLOCATE_MEM(sPrivatePoolp, newsize);
}
mResized = TRUE;
}
}
else if (mGLBuffer)
{
destroyGLBuffer();
}
if (new_index_size)
{
if (!mGLIndices)
{
createGLIndices();
}
else
{
if (!useVBOs())
{
FREE_MEM(sPrivatePoolp, mMappedIndexData) ;
mMappedIndexData = (U8*)ALLOCATE_MEM(sPrivatePoolp, new_index_size);
}
mResized = TRUE;
}
}
else if (mGLIndices)
{
destroyGLIndices();
}
}
if (mResized && useVBOs())
{
Xiaohong Bao
committed
freeClientBuffer() ;
setBuffer(0);
}
}
BOOL LLVertexBuffer::useVBOs() const
{
//it's generally ineffective to use VBO for things that are streaming on apple
if (!mUsage)
{
return FALSE;
}
David Parks
committed
}
//----------------------------------------------------------------------------
Xiaohong Bao
committed
void LLVertexBuffer::freeClientBuffer()
{
if(useVBOs() && sDisableVBOMapping && (mMappedData || mMappedIndexData))
{
FREE_MEM(sPrivatePoolp, mMappedData) ;
FREE_MEM(sPrivatePoolp, mMappedIndexData) ;
Xiaohong Bao
committed
mMappedData = NULL ;
mMappedIndexData = NULL ;
}
}
Xiaohong Bao
committed
void LLVertexBuffer::allocateClientVertexBuffer()
Xiaohong Bao
committed
{
if(!mMappedData)
{
mMappedData = (U8*)ALLOCATE_MEM(sPrivatePoolp, getSize());
Xiaohong Bao
committed
}
Xiaohong Bao
committed
}
Xiaohong Bao
committed
Xiaohong Bao
committed
void LLVertexBuffer::allocateClientIndexBuffer()
{
Xiaohong Bao
committed
if(!mMappedIndexData)
{
mMappedIndexData = (U8*)ALLOCATE_MEM(sPrivatePoolp, getIndicesSize());
Xiaohong Bao
committed
}
}
bool expand_region(LLVertexBuffer::MappedRegion& region, S32 index, S32 count)
{
S32 end = index+count;
S32 region_end = region.mIndex+region.mCount;
if (end < region.mIndex ||
index > region_end)
{ //gap exists, do not merge
return false;
}
S32 new_end = llmax(end, region_end);
S32 new_index = llmin(index, region.mIndex);
region.mIndex = new_index;
region.mCount = new_end-new_index;
return true;
}
// Map for data access
U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range)
LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
Xiaohong Bao
committed
llerrs << "LLVertexBuffer::mapVeretxBuffer() called on a finalized buffer." << llendl;
if (!useVBOs() && !mMappedData && !mMappedIndexData)
Xiaohong Bao
committed
llerrs << "LLVertexBuffer::mapVertexBuffer() called on unallocated buffer." << llendl;
if (useVBOs())
if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
David Parks
committed
count = mNumVerts-index;
}
bool mapped = false;
//see if range is already mapped
for (U32 i = 0; i < mMappedVertexRegions.size(); ++i)
{
MappedRegion& region = mMappedVertexRegions[i];
if (region.mType == type)
{
if (expand_region(region, index, count))
{
mapped = true;
David Parks
committed
break;
}
}
}
if (!mapped)
{
//not already mapped, map new region
David Parks
committed
MappedRegion region(type, !sDisableVBOMapping && map_range ? -1 : index, count);
mMappedVertexRegions.push_back(region);
}
}
if (mVertexLocked && map_range)
{
llerrs << "Attempted to map a specific range of a buffer that was already mapped." << llendl;
}
if (!mVertexLocked)
{
LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_VERTICES);
Xiaohong Bao
committed
setBuffer(0, type);
mVertexLocked = TRUE;
David Parks
committed
sMappedCount++;
stop_glerror();
Xiaohong Bao
committed
if(sDisableVBOMapping)
{
map_range = false;
Xiaohong Bao
committed
allocateClientVertexBuffer() ;
}
else
{
U8* src = NULL;
David Parks
committed
waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
S32 offset = mOffsets[type] + sTypeSize[type]*index;
S32 length = (sTypeSize[type]*count+0xF) & ~0xF;
David Parks
committed
src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, offset, length,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT |
GL_MAP_INVALIDATE_RANGE_BIT);
}
else
{
David Parks
committed
src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, 0, mSize,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT);
#endif
}
}
else if (gGLManager.mHasFlushBufferRange)
{
if (map_range)
{
glBufferParameteriAPPLE(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE);
glBufferParameteriAPPLE(GL_ARRAY_BUFFER_ARB, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE);
src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
else
{
src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
}
else
{
map_range = false;
src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
mMappedData = LL_NEXT_ALIGNED_ADDRESS<U8>(src);
mAlignedOffset = mMappedData - src;
Loren Shih
committed
Xiaohong Bao
committed
}
if (!mMappedData)
{
log_glerror();
//check the availability of memory
LLMemory::logMemoryInfo(TRUE) ;
if(!sDisableVBOMapping)
{
//--------------------
//print out more debug info before crash
llinfos << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << llendl ;
GLint size ;
glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size) ;
llinfos << "GL_ARRAY_BUFFER_ARB size is " << size << llendl ;
//--------------------
GLint buff;
glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff);
if ((GLuint)buff != mGLBuffer)
{
llerrs << "Invalid GL vertex buffer bound: " << buff << llendl;
}
llerrs << "glMapBuffer returned NULL (no vertex data)" << llendl;
}
else
Xiaohong Bao
committed
{
llerrs << "memory allocation for vertex data failed." << llendl ;
Xiaohong Bao
committed
}
}
else
{
map_range = false;
Xiaohong Bao
committed
}
if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
{
return mMappedData;
}
else
{
return mMappedData+mOffsets[type]+sTypeSize[type]*index;
}
Xiaohong Bao
committed
}
U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
Xiaohong Bao
committed
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
if (mFinal)
{
llerrs << "LLVertexBuffer::mapIndexBuffer() called on a finalized buffer." << llendl;
}
if (!useVBOs() && !mMappedData && !mMappedIndexData)
{
llerrs << "LLVertexBuffer::mapIndexBuffer() called on unallocated buffer." << llendl;
}
if (useVBOs())
Xiaohong Bao
committed
{
if (sDisableVBOMapping || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
David Parks
committed
count = mNumIndices-index;
}
bool mapped = false;
//see if range is already mapped
for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
{
MappedRegion& region = mMappedIndexRegions[i];
if (expand_region(region, index, count))
{
mapped = true;
David Parks
committed
break;
}
}
if (!mapped)
{
//not already mapped, map new region
David Parks
committed
MappedRegion region(TYPE_INDEX, !sDisableVBOMapping && map_range ? -1 : index, count);
mMappedIndexRegions.push_back(region);
}
}
if (mIndexLocked && map_range)
{
llerrs << "Attempted to map a specific range of a buffer that was already mapped." << llendl;
}
if (!mIndexLocked)
Xiaohong Bao
committed
{
LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES);
setBuffer(0, TYPE_INDEX);
mIndexLocked = TRUE;
David Parks
committed
sMappedCount++;
Xiaohong Bao
committed
stop_glerror();
if(sDisableVBOMapping)
{
map_range = false;
Xiaohong Bao
committed
allocateClientIndexBuffer() ;
}
else
{
U8* src = NULL;
David Parks
committed
waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
S32 offset = sizeof(U16)*index;
S32 length = sizeof(U16)*count;
David Parks
committed
src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT |
GL_MAP_INVALIDATE_RANGE_BIT);
}
else
{
David Parks
committed
src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, sizeof(U16)*mNumIndices,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT);
#endif
}
}
else if (gGLManager.mHasFlushBufferRange)
{
if (map_range)
{
glBufferParameteriAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE);
glBufferParameteriAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE);
src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
else
{
src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
}
else
{
map_range = false;
src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
mMappedIndexData = src; //LL_NEXT_ALIGNED_ADDRESS<U8>(src);
mAlignedIndexOffset = mMappedIndexData - src;
stop_glerror();
Xiaohong Bao
committed
}
}
if (!mMappedIndexData)
{
log_glerror();
Xiaohong Bao
committed
if(!sDisableVBOMapping)
Xiaohong Bao
committed
GLint buff;
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
if ((GLuint)buff != mGLIndices)
{
llerrs << "Invalid GL index buffer bound: " << buff << llendl;
}
Xiaohong Bao
committed
llerrs << "glMapBuffer returned NULL (no index data)" << llendl;
}
else
{
llerrs << "memory allocation for Index data failed. " << llendl ;
}
else
{
map_range = false;
}
Xiaohong Bao
committed
if (map_range && gGLManager.mHasMapBufferRange && !sDisableVBOMapping)
{
return mMappedIndexData;
}
else
{
return mMappedIndexData + sizeof(U16)*index;
}
Xiaohong Bao
committed
void LLVertexBuffer::unmapBuffer(S32 type)
LLMemType mt2(LLMemType::MTYPE_VERTEX_UNMAP_BUFFER);
if (!useVBOs() || type == -2)
Xiaohong Bao
committed
{
return ; //nothing to unmap
}
bool updated_all = false ;
Xiaohong Bao
committed
if (mMappedData && mVertexLocked && type != TYPE_INDEX)
Xiaohong Bao
committed
updated_all = (mIndexLocked && type < 0) ; //both vertex and index buffers done updating
if(sDisableVBOMapping)
Xiaohong Bao
committed
{
David Parks
committed
if (!mMappedVertexRegions.empty())
{
stop_glerror();
for (U32 i = 0; i < mMappedVertexRegions.size(); ++i)
{
const MappedRegion& region = mMappedVertexRegions[i];
S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0;
S32 length = sTypeSize[region.mType]*region.mCount;
glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, offset, length, mMappedData+offset);
stop_glerror();
}
mMappedVertexRegions.clear();
}
else
{
stop_glerror();
glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData);
stop_glerror();
}
Xiaohong Bao
committed
}
else
{
if (gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (!mMappedVertexRegions.empty())
{
stop_glerror();
for (U32 i = 0; i < mMappedVertexRegions.size(); ++i)
{
const MappedRegion& region = mMappedVertexRegions[i];
S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0;
S32 length = sTypeSize[region.mType]*region.mCount;
if (gGLManager.mHasMapBufferRange)
{
#ifdef GL_ARB_map_buffer_range
glFlushMappedBufferRange(GL_ARRAY_BUFFER_ARB, offset, length);
#endif
}
else if (gGLManager.mHasFlushBufferRange)
{
glFlushMappedBufferRangeAPPLE(GL_ARRAY_BUFFER_ARB, offset, length);
}
stop_glerror();
}
mMappedVertexRegions.clear();
}
}
Xiaohong Bao
committed
stop_glerror();
glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
stop_glerror();
Xiaohong Bao
committed
mMappedData = NULL;
}
Xiaohong Bao
committed
mVertexLocked = FALSE ;
sMappedCount--;
}
if (mMappedIndexData && mIndexLocked && (type < 0 || type == TYPE_INDEX))
Xiaohong Bao
committed
{
if(sDisableVBOMapping)
{
David Parks
committed
if (!mMappedIndexRegions.empty())
{
for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
{
const MappedRegion& region = mMappedIndexRegions[i];
S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0;
S32 length = sizeof(U16)*region.mCount;
glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, mMappedIndexData+offset);
stop_glerror();
}
mMappedIndexRegions.clear();
}
else
{
stop_glerror();
glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData);
stop_glerror();
}
Xiaohong Bao
committed
}
else
{
if (gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (!mMappedIndexRegions.empty())
{
for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
{
const MappedRegion& region = mMappedIndexRegions[i];
S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0;
S32 length = sizeof(U16)*region.mCount;
if (gGLManager.mHasMapBufferRange)
{
#ifdef GL_ARB_map_buffer_range
glFlushMappedBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
#endif
}
else if (gGLManager.mHasFlushBufferRange)
{
David Parks
committed
#ifdef GL_APPLE_flush_buffer_range
glFlushMappedBufferRangeAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
David Parks
committed
#endif
stop_glerror();
}
mMappedIndexRegions.clear();
}
}
Xiaohong Bao
committed
stop_glerror();
glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
stop_glerror();
Xiaohong Bao
committed
mMappedIndexData = NULL ;
}
mIndexLocked = FALSE ;
sMappedCount--;
}
if(updated_all)
{
if(mUsage == GL_STATIC_DRAW_ARB)
{
//static draw buffers can only be mapped a single time
//throw out client data (we won't be using it again)
mEmpty = TRUE;
mFinal = TRUE;
if(sDisableVBOMapping)
Xiaohong Bao
committed
{
Xiaohong Bao
committed
freeClientBuffer() ;
Xiaohong Bao
committed
}
Xiaohong Bao
committed
}
else
{
mEmpty = FALSE;
}
}
}
//----------------------------------------------------------------------------
template <class T,S32 type> struct VertexBufferStrider
{
typedef LLStrider<T> strider_t;
static bool get(LLVertexBuffer& vbo,
strider_t& strider,
S32 index, S32 count, bool map_range)
{
if (type == LLVertexBuffer::TYPE_INDEX)
{
U8* ptr = vbo.mapIndexBuffer(index, count, map_range);
if (ptr == NULL)
Xiaohong Bao
committed
{
llwarns << "mapIndexBuffer failed!" << llendl;
return FALSE;
}
strider = (T*)ptr;
strider.setStride(0);
return TRUE;
}
else if (vbo.hasDataType(type))
{
S32 stride = LLVertexBuffer::sTypeSize[type];
Xiaohong Bao
committed
U8* ptr = vbo.mapVertexBuffer(type, index, count, map_range);
if (ptr == NULL)
Xiaohong Bao
committed
{
llwarns << "mapVertexBuffer failed!" << llendl;
return FALSE;
}
strider = (T*)ptr;
strider.setStride(stride);
return TRUE;
}
else
{
llerrs << "VertexBufferStrider could not find valid vertex data." << llendl;
}
return FALSE;
}
};
bool LLVertexBuffer::getVertexStrider(LLStrider<LLVector3>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector3,TYPE_VERTEX>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getIndexStrider(LLStrider<U16>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<U16,TYPE_INDEX>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getTexCoord0Strider(LLStrider<LLVector2>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector2,TYPE_TEXCOORD0>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getTexCoord1Strider(LLStrider<LLVector2>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector2,TYPE_TEXCOORD1>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getNormalStrider(LLStrider<LLVector3>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector3,TYPE_NORMAL>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getBinormalStrider(LLStrider<LLVector3>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector3,TYPE_BINORMAL>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getColorStrider(LLStrider<LLColor4U>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLColor4U,TYPE_COLOR>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getEmissiveStrider(LLStrider<U8>& strider, S32 index, S32 count, bool map_range)
{
return VertexBufferStrider<U8,TYPE_EMISSIVE>::get(*this, strider, index, count, map_range);
}
bool LLVertexBuffer::getWeightStrider(LLStrider<F32>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<F32,TYPE_WEIGHT>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getWeight4Strider(LLStrider<LLVector4>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector4,TYPE_WEIGHT4>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getClothWeightStrider(LLStrider<LLVector4>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector4,TYPE_CLOTHWEIGHT>::get(*this, strider, index, count, map_range);
}
//----------------------------------------------------------------------------
// Set for rendering
Xiaohong Bao
committed
void LLVertexBuffer::setBuffer(U32 data_mask, S32 type)
LLMemType mt2(LLMemType::MTYPE_VERTEX_SET_BUFFER);
//set up pointers if the data mask is different ...
BOOL setup = (sLastMask != data_mask);
David Parks
committed
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
if (gDebugGL && data_mask != 0)
{
LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
if (shader)
{
U32 required_mask = 0;
for (U32 i = 0; i < LLVertexBuffer::TYPE_MAX; ++i)
{
if (shader->getAttribLocation(i) > -1)
{
U32 required = 1 << i;
if ((data_mask & required) == 0)
{
llwarns << "Missing attribute: " << i << llendl;
}
required_mask |= required;
}
}
if ((data_mask & required_mask) != required_mask)
{
llerrs << "Shader consumption mismatches data provision." << llendl;
}
}
}
if (useVBOs())
{
if (mGLBuffer && (mGLBuffer != sGLRenderBuffer || !sVBOActive))
{
/*if (sMapped)
{
llerrs << "VBO bound while another VBO mapped!" << llendl;
}*/
glBindBufferARB(GL_ARRAY_BUFFER_ARB, mGLBuffer);
sBindCount++;
sVBOActive = TRUE;
setup = TRUE; // ... or the bound buffer changed
}
if (mGLIndices && (mGLIndices != sGLRenderIndices || !sIBOActive))
{
/*if (sMapped)
{
llerrs << "VBO bound while another VBO mapped!" << llendl;
}*/
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, mGLIndices);
sBindCount++;
sIBOActive = TRUE;
}
BOOL error = FALSE;
if (gDebugGL)
{
GLint buff;
glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff);
if (gDebugSession)
{
error = TRUE;
gFailLog << "Invalid GL vertex buffer bound: " << buff << std::endl;
}
else
{
llerrs << "Invalid GL vertex buffer bound: " << buff << llendl;
}
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
if ((GLuint)buff != mGLIndices)
if (gDebugSession)
{
error = TRUE;
gFailLog << "Invalid GL index buffer bound: " << buff << std::endl;
}
else
{
llerrs << "Invalid GL index buffer bound: " << buff << llendl;
}
if (mResized)
{
if (gDebugGL)
{
GLint buff;
glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff);
if (gDebugSession)
{
error = TRUE;
gFailLog << "Invalid GL vertex buffer bound: " << std::endl;
}
else
{
llerrs << "Invalid GL vertex buffer bound: " << buff << llendl;
}
if (mGLIndices != 0)
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
if ((GLuint)buff != mGLIndices)
if (gDebugSession)
{
error = TRUE;
gFailLog << "Invalid GL index buffer bound: "<< std::endl;
}
else
{
llerrs << "Invalid GL index buffer bound: " << buff << llendl;
}
Xiaohong Bao
committed
if (mGLBuffer)
{
glBufferDataARB(GL_ARRAY_BUFFER_ARB, getSize(), NULL, mUsage);
}
Xiaohong Bao
committed
if (mGLIndices)
{
glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, getIndicesSize(), NULL, mUsage);
}
mEmpty = TRUE;
mResized = FALSE;
if (data_mask != 0)
{
if (gDebugSession)
{
error = TRUE;
gFailLog << "Buffer set for rendering before being filled after resize." << std::endl;
}
else
{
llerrs << "Buffer set for rendering before being filled after resize." << llendl;
}
}
}
if (error)
{
ll_fail("LLVertexBuffer::mapBuffer failed");
}
Xiaohong Bao
committed
unmapBuffer(type);
Josh Bell
committed
{
if (mGLBuffer)
{
{
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
sBindCount++;
sVBOActive = FALSE;
setup = TRUE; // ... or a VBO is deactivated
}
if (sGLRenderBuffer != mGLBuffer)
{
setup = TRUE; // ... or a client memory pointer changed
}
}
if (mGLIndices && sIBOActive)
/*if (sMapped)
{
llerrs << "VBO unbound while potentially mapped!" << llendl;
}*/
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
sBindCount++;
sIBOActive = FALSE;
}
}
setupClientArrays(data_mask);
if (mGLIndices)
{
sGLRenderIndices = mGLIndices;
}
if (mGLBuffer)
{
sGLRenderBuffer = mGLBuffer;
if (data_mask && setup)
{
setupVertexBuffer(data_mask); // subclass specific setup (virtual function)
sSetCount++;
}
}
}
// virtual (default)
void LLVertexBuffer::setupVertexBuffer(U32 data_mask) const
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_SETUP_VERTEX_BUFFER);
U8* base = useVBOs() ? (U8*) mAlignedOffset : mMappedData;
if ((data_mask & mTypeMask) != data_mask)
{
llerrs << "LLVertexBuffer::setupVertexBuffer missing required components for supplied data mask." << llendl;
}
LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
//assert that fixed function is allowed OR a shader is currently bound
llassert(!LLGLSLShader::sNoFixedFunction || shader != NULL);
if (data_mask & MAP_NORMAL)
{
S32 loc = -1;
if (shader)
{
loc = shader->getAttribLocation(TYPE_NORMAL);
}