Newer
Older
/**
* @file llvertexbuffer.cpp
* @brief LLVertexBuffer implementation
*
* $LicenseInfo:firstyear=2003&license=viewerlgpl$
* Copyright (C) 2010, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
#include "linden_common.h"
#include "llfasttimer.h"
#include "llsys.h"
#include "llvertexbuffer.h"
// #include "llrender.h"
#include "llglheaders.h"
#include "llrender.h"
David Parks
committed
#include "llshadermgr.h"
#include "llglslshader.h"
//Next Highest Power Of Two
//helper function, returns first number > v that is a power of 2, or v if v is already a power of 2
U32 nhpo2(U32 v)
{
U32 r = 1;
while (r < v) {
r *= 2;
}
return r;
}
//which power of 2 is i?
//assumes i is a power of 2 > 0
U32 wpo2(U32 i)
{
llassert(i > 0);
llassert(nhpo2(i) == i);
U32 r = 0;
while (i >>= 1) ++r;
return r;
}
const U32 LL_VBO_BLOCK_SIZE = 2048;
David Parks
committed
const U32 LL_VBO_POOL_MAX_SEED_SIZE = 256*1024;
U32 vbo_block_size(U32 size)
{ //what block size will fit size?
U32 mod = size % LL_VBO_BLOCK_SIZE;
return mod == 0 ? size : size + (LL_VBO_BLOCK_SIZE-mod);
}
U32 vbo_block_index(U32 size)
{
return vbo_block_size(size)/LL_VBO_BLOCK_SIZE;
}
David Parks
committed
const U32 LL_VBO_POOL_SEED_COUNT = vbo_block_index(LL_VBO_POOL_MAX_SEED_SIZE);
//============================================================================
//static
Leslie Linden
committed
LLVBOPool LLVertexBuffer::sStreamVBOPool(GL_STREAM_DRAW_ARB, GL_ARRAY_BUFFER_ARB);
LLVBOPool LLVertexBuffer::sDynamicVBOPool(GL_DYNAMIC_DRAW_ARB, GL_ARRAY_BUFFER_ARB);
LLVBOPool LLVertexBuffer::sDynamicCopyVBOPool(GL_DYNAMIC_COPY_ARB, GL_ARRAY_BUFFER_ARB);
Leslie Linden
committed
LLVBOPool LLVertexBuffer::sStreamIBOPool(GL_STREAM_DRAW_ARB, GL_ELEMENT_ARRAY_BUFFER_ARB);
LLVBOPool LLVertexBuffer::sDynamicIBOPool(GL_DYNAMIC_DRAW_ARB, GL_ELEMENT_ARRAY_BUFFER_ARB);
U32 LLVBOPool::sBytesPooled = 0;
U32 LLVBOPool::sIndexBytesPooled = 0;
David Parks
committed
std::list<U32> LLVertexBuffer::sAvailableVAOName;
U32 LLVertexBuffer::sCurVAOName = 1;
U32 LLVertexBuffer::sAllocatedIndexBytes = 0;
U32 LLVertexBuffer::sIndexCount = 0;
U32 LLVertexBuffer::sBindCount = 0;
U32 LLVertexBuffer::sSetCount = 0;
S32 LLVertexBuffer::sCount = 0;
S32 LLVertexBuffer::sGLCount = 0;
S32 LLVertexBuffer::sMappedCount = 0;
bool LLVertexBuffer::sDisableVBOMapping = false;
bool LLVertexBuffer::sEnableVBOs = true;
Josh Bell
committed
U32 LLVertexBuffer::sGLRenderBuffer = 0;
U32 LLVertexBuffer::sGLRenderArray = 0;
Josh Bell
committed
U32 LLVertexBuffer::sGLRenderIndices = 0;
U32 LLVertexBuffer::sLastMask = 0;
bool LLVertexBuffer::sVBOActive = false;
bool LLVertexBuffer::sIBOActive = false;
U32 LLVertexBuffer::sAllocatedBytes = 0;
U32 LLVertexBuffer::sVertexCount = 0;
bool LLVertexBuffer::sMapped = false;
bool LLVertexBuffer::sUseStreamDraw = true;
bool LLVertexBuffer::sUseVAO = false;
bool LLVertexBuffer::sPreferStreamDraw = false;
David Parks
committed
David Parks
committed
U32 LLVBOPool::genBuffer()
David Parks
committed
{
David Parks
committed
U32 ret = 0;
David Parks
committed
glGenBuffersARB(1, &ret);
David Parks
committed
return ret;
}
void LLVBOPool::deleteBuffer(U32 name)
{
if (gGLManager.mInited)
{
LLVertexBuffer::unbind();
David Parks
committed
glBindBufferARB(mType, name);
glBufferDataARB(mType, 0, NULL, mUsage);
glBindBufferARB(mType, 0);
glDeleteBuffersARB(1, &name);
David Parks
committed
}
David Parks
committed
David Parks
committed
LLVBOPool::LLVBOPool(U32 vboUsage, U32 vboType)
: mUsage(vboUsage), mType(vboType)
{
mMissCount.resize(LL_VBO_POOL_SEED_COUNT);
std::fill(mMissCount.begin(), mMissCount.end(), 0);
}
volatile U8* LLVBOPool::allocate(U32& name, U32 size, bool for_seed)
llassert(vbo_block_size(size) == size);
volatile U8* ret = NULL;
U32 i = vbo_block_index(size);
if (mFreeList.size() <= i)
{
mFreeList.resize(i+1);
}
David Parks
committed
if (mFreeList[i].empty() || for_seed)
{
//make a new buffer
name = genBuffer();
glBindBufferARB(mType, name);
David Parks
committed
if (!for_seed && i < LL_VBO_POOL_SEED_COUNT)
{ //record this miss
mMissCount[i]++;
}
if (mType == GL_ARRAY_BUFFER_ARB)
{
LLVertexBuffer::sAllocatedBytes += size;
}
else
{
LLVertexBuffer::sAllocatedIndexBytes += size;
}
if (LLVertexBuffer::sDisableVBOMapping || mUsage != GL_DYNAMIC_DRAW_ARB)
glBufferDataARB(mType, size, 0, mUsage);
if (mUsage != GL_DYNAMIC_COPY_ARB)
{ //data will be provided by application
Richard Linden
committed
ret = (U8*) ll_aligned_malloc<64>(size);
LL_ERRS() << "Failed to allocate "<< size << " bytes for LLVBOPool buffer " << name <<"." << LL_NEWLINE
<< "Free list size: " << mFreeList.size() // this happens if we are out of memory so a solution might be to clear some from freelist
<< " Allocated Bytes: " << LLVertexBuffer::sAllocatedBytes
<< " Allocated Index Bytes: " << LLVertexBuffer::sAllocatedIndexBytes
<< " Pooled Bytes: " << sBytesPooled
<< " Pooled Index Bytes: " << sIndexBytesPooled
else
{ //always use a true hint of static draw when allocating non-client-backed buffers
glBufferDataARB(mType, size, nullptr, GL_STATIC_DRAW_ARB);
}
glBindBufferARB(mType, 0);
David Parks
committed
if (for_seed)
{ //put into pool for future use
llassert(mFreeList.size() > i);
Record rec;
rec.mGLName = name;
rec.mClientData = ret;
if (mType == GL_ARRAY_BUFFER_ARB)
{
sBytesPooled += size;
}
else
{
sIndexBytesPooled += size;
}
mFreeList[i].push_back(rec);
}
}
else
{
name = mFreeList[i].front().mGLName;
ret = mFreeList[i].front().mClientData;
if (mType == GL_ARRAY_BUFFER_ARB)
{
sBytesPooled -= size;
}
else
{
sIndexBytesPooled -= size;
}
mFreeList[i].pop_front();
}
return ret;
}
void LLVBOPool::release(U32 name, volatile U8* buffer, U32 size)
llassert(vbo_block_size(size) == size);
David Parks
committed
deleteBuffer(name);
if (mType == GL_ARRAY_BUFFER_ARB)
{
LLVertexBuffer::sAllocatedBytes -= size;
}
else
{
LLVertexBuffer::sAllocatedIndexBytes -= size;
}
}
David Parks
committed
void LLVBOPool::seedPool()
{
U32 dummy_name = 0;
if (mFreeList.size() < LL_VBO_POOL_SEED_COUNT)
{
mFreeList.resize(LL_VBO_POOL_SEED_COUNT);
}
for (U32 i = 0; i < LL_VBO_POOL_SEED_COUNT; i++)
{
if (mMissCount[i] > mFreeList[i].size())
{
U32 size = i*LL_VBO_BLOCK_SIZE;
S32 count = mMissCount[i] - mFreeList[i].size();
for (U32 j = 0; j < count; ++j)
{
allocate(dummy_name, size, true);
}
}
}
}
simon@Simon-PC.lindenlab.com
committed
void LLVBOPool::cleanup()
{
David Parks
committed
U32 size = LL_VBO_BLOCK_SIZE;
for (U32 i = 0; i < mFreeList.size(); ++i)
{
record_list_t& l = mFreeList[i];
while (!l.empty())
{
Record& r = l.front();
David Parks
committed
deleteBuffer(r.mGLName);
if (r.mClientData)
{
Richard Linden
committed
ll_aligned_free<64>((void*) r.mClientData);
}
l.pop_front();
if (mType == GL_ARRAY_BUFFER_ARB)
{
sBytesPooled -= size;
LLVertexBuffer::sAllocatedBytes -= size;
}
else
{
sIndexBytesPooled -= size;
LLVertexBuffer::sAllocatedIndexBytes -= size;
}
}
David Parks
committed
size += LL_VBO_BLOCK_SIZE;
David Parks
committed
//reset miss counts
std::fill(mMissCount.begin(), mMissCount.end(), 0);
}
David Parks
committed
//NOTE: each component must be AT LEAST 4 bytes in size to avoid a performance penalty on AMD hardware
const S32 LLVertexBuffer::sTypeSize[LLVertexBuffer::TYPE_MAX] =
sizeof(LLVector4), // TYPE_VERTEX,
sizeof(LLVector4), // TYPE_NORMAL,
sizeof(LLVector2), // TYPE_TEXCOORD0,
sizeof(LLVector2), // TYPE_TEXCOORD1,
sizeof(LLVector2), // TYPE_TEXCOORD2,
sizeof(LLVector2), // TYPE_TEXCOORD3,
sizeof(LLColor4U), // TYPE_COLOR,
David Parks
committed
sizeof(LLColor4U), // TYPE_EMISSIVE, only alpha is used currently
David Parks
committed
sizeof(LLVector4), // TYPE_TANGENT,
sizeof(F32), // TYPE_WEIGHT,
sizeof(LLVector4), // TYPE_CLOTHWEIGHT,
sizeof(LLVector4), // TYPE_TEXTURE_INDEX (actually exists as position.w), no extra data, but stride is 16 bytes
{
"TYPE_VERTEX",
"TYPE_NORMAL",
"TYPE_TEXCOORD0",
"TYPE_TEXCOORD1",
"TYPE_TEXCOORD2",
"TYPE_TEXCOORD3",
"TYPE_COLOR",
"TYPE_EMISSIVE",
David Parks
committed
"TYPE_TANGENT",
"TYPE_WEIGHT",
"TYPE_WEIGHT4",
"TYPE_CLOTHWEIGHT",
"TYPE_TEXTURE_INDEX",
"TYPE_MAX",
"TYPE_INDEX",
};
const U32 LLVertexBuffer::sGLMode[LLRender::NUM_MODES] =
{
GL_TRIANGLES,
GL_TRIANGLE_STRIP,
GL_TRIANGLE_FAN,
GL_POINTS,
GL_LINES,
GL_LINE_STRIP,
GL_QUADS,
GL_LINE_LOOP,
David Parks
committed
//static
U32 LLVertexBuffer::getVAOName()
{
U32 ret = 0;
if (!sAvailableVAOName.empty())
{
ret = sAvailableVAOName.front();
sAvailableVAOName.pop_front();
}
else
{
David Parks
committed
glGenVertexArrays(1, &ret);
David Parks
committed
}
return ret;
}
//static
void LLVertexBuffer::releaseVAOName(U32 name)
{
sAvailableVAOName.push_back(name);
}
David Parks
committed
//static
void LLVertexBuffer::seedPools()
{
sStreamVBOPool.seedPool();
sDynamicVBOPool.seedPool();
sDynamicCopyVBOPool.seedPool();
David Parks
committed
sStreamIBOPool.seedPool();
sDynamicIBOPool.seedPool();
}
void LLVertexBuffer::setupClientArrays(U32 data_mask)
David Parks
committed
if (sLastMask != data_mask)
David Parks
committed
David Parks
committed
if (gGLManager.mGLSLVersionMajor < 2 && gGLManager.mGLSLVersionMinor < 30)
{
//make sure texture index is disabled
data_mask = data_mask & ~MAP_TEXTURE_INDEX;
}
if (LLGLSLShader::sNoFixedFunction)
{
for (U32 i = 0; i < TYPE_MAX; ++i)
S32 loc = i;
U32 mask = 1 << i;
if (sLastMask & (1 << i))
{ //was enabled
if (!(data_mask & mask))
{ //needs to be disabled
glDisableVertexAttribArrayARB(loc);
}
}
else
{ //was disabled
if (data_mask & mask)
{ //needs to be enabled
glEnableVertexAttribArrayARB(loc);
}
}
}
}
else
{
static const GLenum array[] =
{
GL_VERTEX_ARRAY,
GL_NORMAL_ARRAY,
GL_TEXTURE_COORD_ARRAY,
GL_COLOR_ARRAY,
};
static const GLenum mask[] =
{
MAP_VERTEX,
MAP_NORMAL,
MAP_TEXCOORD0,
MAP_COLOR
};
for (U32 i = 0; i < 4; ++i)
{
if (sLastMask & mask[i])
{ //was enabled
if (!(data_mask & mask[i]))
{ //needs to be disabled
glDisableClientState(array[i]);
}
else if (gDebugGL)
{ //needs to be enabled, make sure it was (DEBUG)
if (!glIsEnabled(array[i]))
{
if (gDebugSession)
{
gFailLog << "Bad client state! " << array[i] << " disabled." << std::endl;
}
else
{
LL_ERRS() << "Bad client state! " << array[i] << " disabled." << LL_ENDL;
}
}
}
else
{ //was disabled
if (data_mask & mask[i])
{ //needs to be enabled
glEnableClientState(array[i]);
}
else if (gDebugGL && glIsEnabled(array[i]))
{ //needs to be disabled, make sure it was (DEBUG TEMPORARY)
if (gDebugSession)
{
gFailLog << "Bad client state! " << array[i] << " enabled." << std::endl;
}
else
{
LL_ERRS() << "Bad client state! " << array[i] << " enabled." << LL_ENDL;
static const U32 map_tc[] =
{
MAP_TEXCOORD1,
MAP_TEXCOORD2,
MAP_TEXCOORD3
};
David Parks
committed
for (U32 i = 0; i < 3; i++)
if (sLastMask & map_tc[i])
{
if (!(data_mask & map_tc[i]))
{ //disable
glClientActiveTextureARB(GL_TEXTURE1_ARB+i);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glClientActiveTextureARB(GL_TEXTURE0_ARB);
}
else if (data_mask & map_tc[i])
{
glClientActiveTextureARB(GL_TEXTURE1_ARB+i);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glClientActiveTextureARB(GL_TEXTURE0_ARB);
}
}
David Parks
committed
if (sLastMask & MAP_TANGENT)
David Parks
committed
if (!(data_mask & MAP_TANGENT))
{
glClientActiveTextureARB(GL_TEXTURE2_ARB);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glClientActiveTextureARB(GL_TEXTURE0_ARB);
}
David Parks
committed
else if (data_mask & MAP_TANGENT)
glClientActiveTextureARB(GL_TEXTURE2_ARB);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glClientActiveTextureARB(GL_TEXTURE0_ARB);
David Parks
committed
sLastMask = data_mask;
static LLTrace::BlockTimerStatHandle FTM_VB_DRAW_ARRAYS("drawArrays");
void LLVertexBuffer::drawArrays(U32 mode, const std::vector<LLVector3>& pos, const std::vector<LLVector3>& norm)
//LL_RECORD_BLOCK_TIME(FTM_VB_DRAW_ARRAYS);
David Parks
committed
llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
gGL.syncMatrices();
David Parks
committed
llassert(norm.size() >= pos.size());
llassert(count > 0);
if( count == 0 )
{
LL_WARNS() << "Called drawArrays with 0 vertices" << LL_ENDL;
return;
}
if( norm.size() < pos.size() )
{
LL_WARNS() << "Called drawArrays with #" << norm.size() << " normals and #" << pos.size() << " vertices" << LL_ENDL;
return;
}
unbind();
setupClientArrays(MAP_VERTEX | MAP_NORMAL);
David Parks
committed
LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
if (shader)
{
David Parks
committed
S32 loc = LLVertexBuffer::TYPE_VERTEX;
David Parks
committed
if (loc > -1)
{
glVertexAttribPointerARB(loc, 3, GL_FLOAT, GL_FALSE, 0, pos[0].mV);
}
David Parks
committed
loc = LLVertexBuffer::TYPE_NORMAL;
David Parks
committed
if (loc > -1)
{
glVertexAttribPointerARB(loc, 3, GL_FLOAT, GL_FALSE, 0, norm[0].mV);
}
}
else
{
glVertexPointer(3, GL_FLOAT, 0, pos[0].mV);
glNormalPointer(GL_FLOAT, 0, norm[0].mV);
}
David Parks
committed
LLGLSLShader::startProfile();
glDrawArrays(sGLMode[mode], 0, count);
David Parks
committed
LLGLSLShader::stopProfile(count, mode);
David Parks
committed
//static
void LLVertexBuffer::drawElements(U32 mode, const LLVector4a* pos, const LLVector2* tc, S32 num_indices, const U16* indicesp)
{
llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
gGL.syncMatrices();
David Parks
committed
U32 mask = LLVertexBuffer::MAP_VERTEX;
if (tc)
{
mask = mask | LLVertexBuffer::MAP_TEXCOORD0;
}
unbind();
setupClientArrays(mask);
David Parks
committed
if (LLGLSLShader::sNoFixedFunction)
David Parks
committed
{
David Parks
committed
S32 loc = LLVertexBuffer::TYPE_VERTEX;
glVertexAttribPointerARB(loc, 3, GL_FLOAT, GL_FALSE, 16, pos);
David Parks
committed
David Parks
committed
if (tc)
{
loc = LLVertexBuffer::TYPE_TEXCOORD0;
glVertexAttribPointerARB(loc, 2, GL_FLOAT, GL_FALSE, 0, tc);
David Parks
committed
}
}
else
{
glTexCoordPointer(2, GL_FLOAT, 0, tc);
glVertexPointer(3, GL_FLOAT, 16, pos);
}
David Parks
committed
LLGLSLShader::startProfile();
David Parks
committed
glDrawElements(sGLMode[mode], num_indices, GL_UNSIGNED_SHORT, indicesp);
David Parks
committed
LLGLSLShader::stopProfile(num_indices, mode);
David Parks
committed
}
void LLVertexBuffer::validateRange(U32 start, U32 end, U32 count, U32 indices_offset) const
llassert(start < (U32)mNumVerts);
llassert(end < (U32)mNumVerts);
if (start >= (U32) mNumVerts ||
end >= (U32) mNumVerts)
LL_ERRS() << "Bad vertex buffer draw range: [" << start << ", " << end << "] vs " << mNumVerts << LL_ENDL;
llassert(mNumIndices >= 0);
if (indices_offset >= (U32) mNumIndices ||
indices_offset + count > (U32) mNumIndices)
LL_ERRS() << "Bad index buffer draw range: [" << indices_offset << ", " << indices_offset+count << "]" << LL_ENDL;
if (gDebugGL && !useVBOs())
{
U16* idx = ((U16*) getIndicesPointer())+indices_offset;
for (U32 i = 0; i < count; ++i)
{
llassert(idx[i] >= start);
llassert(idx[i] <= end);
if (idx[i] < start || idx[i] > end)
{
LL_ERRS() << "Index out of range: " << idx[i] << " not in [" << start << ", " << end << "]" << LL_ENDL;
LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
if (shader && shader->mFeatures.mIndexedTextureChannels > 1)
{
LLStrider<LLVector4a> v;
//hack to get non-const reference
LLVertexBuffer* vb = (LLVertexBuffer*) this;
vb->getVertexStrider(v);
for (U32 i = start; i < end; i++)
{
S32 idx = (S32) (v[i][3]+0.25f);
llassert(idx >= 0);
if (idx < 0 || idx >= shader->mFeatures.mIndexedTextureChannels)
{
LL_ERRS() << "Bad texture index found in vertex data stream." << LL_ENDL;
}
}
}
}
}
void LLVertexBuffer::drawRange(U32 mode, U32 start, U32 end, U32 count, U32 indices_offset) const
{
validateRange(start, end, count, indices_offset);
Leslie Linden
committed
mMappable = false;
gGL.syncMatrices();
llassert(mNumVerts >= 0);
David Parks
committed
llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
David Parks
committed
if (mGLArray)
David Parks
committed
if (mGLArray != sGLRenderArray)
{
LL_ERRS() << "Wrong vertex array bound." << LL_ENDL;
David Parks
committed
}
David Parks
committed
else
{
if (mGLIndices != sGLRenderIndices)
{
LL_ERRS() << "Wrong index buffer bound." << LL_ENDL;
David Parks
committed
}
David Parks
committed
if (mGLBuffer != sGLRenderBuffer)
{
LL_ERRS() << "Wrong vertex buffer bound." << LL_ENDL;
David Parks
committed
}
}
if (gDebugGL && !mGLArray && useVBOs())
David Parks
committed
GLint elem = 0;
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &elem);
if (elem != mGLIndices)
{
LL_ERRS() << "Wrong index buffer bound!" << LL_ENDL;
David Parks
committed
}
LL_ERRS() << "Invalid draw mode: " << mode << LL_ENDL;
stop_glerror();
David Parks
committed
LLGLSLShader::startProfile();
glDrawRangeElements(sGLMode[mode], start, end, count, GL_UNSIGNED_SHORT,
David Parks
committed
LLGLSLShader::stopProfile(count, mode);
David Parks
committed
David Parks
committed
placeFence();
}
void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
{
David Parks
committed
llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
Leslie Linden
committed
mMappable = false;
gGL.syncMatrices();
llassert(mNumIndices >= 0);
if (indices_offset >= (U32) mNumIndices ||
indices_offset + count > (U32) mNumIndices)
LL_ERRS() << "Bad index buffer draw range: [" << indices_offset << ", " << indices_offset+count << "]" << LL_ENDL;
David Parks
committed
if (mGLArray)
David Parks
committed
if (mGLArray != sGLRenderArray)
{
LL_ERRS() << "Wrong vertex array bound." << LL_ENDL;
David Parks
committed
}
David Parks
committed
else
David Parks
committed
if (mGLIndices != sGLRenderIndices)
{
LL_ERRS() << "Wrong index buffer bound." << LL_ENDL;
David Parks
committed
}
if (mGLBuffer != sGLRenderBuffer)
{
LL_ERRS() << "Wrong vertex buffer bound." << LL_ENDL;
David Parks
committed
}
LL_ERRS() << "Invalid draw mode: " << mode << LL_ENDL;
stop_glerror();
David Parks
committed
LLGLSLShader::startProfile();
glDrawElements(sGLMode[mode], count, GL_UNSIGNED_SHORT,
((U16*) getIndicesPointer()) + indices_offset);
David Parks
committed
LLGLSLShader::stopProfile(count, mode);
stop_glerror();
David Parks
committed
placeFence();
static LLTrace::BlockTimerStatHandle FTM_GL_DRAW_ARRAYS("GL draw arrays");
void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
{
David Parks
committed
llassert(!LLGLSLShader::sNoFixedFunction || LLGLSLShader::sCurBoundShaderPtr != NULL);
Leslie Linden
committed
mMappable = false;
gGL.syncMatrices();
llassert(mNumVerts >= 0);
if (first >= (U32) mNumVerts ||
first + count > (U32) mNumVerts)
LL_ERRS() << "Bad vertex buffer draw range: [" << first << ", " << first+count << "]" << LL_ENDL;
David Parks
committed
if (mGLArray)
David Parks
committed
if (mGLArray != sGLRenderArray)
{
LL_ERRS() << "Wrong vertex array bound." << LL_ENDL;
David Parks
committed
}
}
else
{
if (mGLBuffer != sGLRenderBuffer || useVBOs() != sVBOActive)
{
LL_ERRS() << "Wrong vertex buffer bound." << LL_ENDL;
David Parks
committed
}
LL_ERRS() << "Invalid draw mode: " << mode << LL_ENDL;
//LL_RECORD_BLOCK_TIME(FTM_GL_DRAW_ARRAYS);
stop_glerror();
LLGLSLShader::startProfile();
stop_glerror();
glDrawArrays(sGLMode[mode], first, count);
stop_glerror();
LLGLSLShader::stopProfile(count, mode);
David Parks
committed
placeFence();
Xiaohong Bao
committed
void LLVertexBuffer::initClass(bool use_vbo, bool no_vbo_mapping)
Leslie Linden
committed
sEnableVBOs = use_vbo && gGLManager.mHasVertexBufferObject;
sDisableVBOMapping = sEnableVBOs && no_vbo_mapping;
}
//static
void LLVertexBuffer::unbind()
{
if (sGLRenderArray)
{
glBindVertexArray(0);
sGLRenderArray = 0;
David Parks
committed
sGLRenderIndices = 0;
}
if (sVBOActive)
{
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
}
if (sIBOActive)
{
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
}
sGLRenderBuffer = 0;
sGLRenderIndices = 0;
}
//static
void LLVertexBuffer::cleanupClass()
{
sStreamIBOPool.cleanup();
sDynamicIBOPool.cleanup();
sStreamVBOPool.cleanup();
sDynamicVBOPool.cleanup();
sDynamicCopyVBOPool.cleanup();
}
//----------------------------------------------------------------------------
Leslie Linden
committed
S32 LLVertexBuffer::determineUsage(S32 usage)
Leslie Linden
committed
S32 ret_usage = usage;
if (!sEnableVBOs)
{
Leslie Linden
committed
ret_usage = 0;
Leslie Linden
committed
if (ret_usage == GL_STREAM_DRAW_ARB && !sUseStreamDraw)
Leslie Linden
committed
ret_usage = 0;
if (ret_usage == GL_DYNAMIC_DRAW_ARB && sPreferStreamDraw)
Leslie Linden
committed
ret_usage = GL_STREAM_DRAW_ARB;
Leslie Linden
committed
if (ret_usage == 0 && LLRender::sGLCoreProfile)
{ //MUST use VBOs for all rendering
Leslie Linden
committed
ret_usage = GL_STREAM_DRAW_ARB;
}
Leslie Linden
committed
if (ret_usage && ret_usage != GL_STREAM_DRAW_ARB)
{ //only stream_draw and dynamic_draw are supported when using VBOs, dynamic draw is the default
if (ret_usage != GL_DYNAMIC_COPY_ARB)
{
if (sDisableVBOMapping)
{ //always use stream draw if VBO mapping is disabled
ret_usage = GL_STREAM_DRAW_ARB;
}
else
{
ret_usage = GL_DYNAMIC_DRAW_ARB;
}
}
Leslie Linden
committed
return ret_usage;
}
LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage)
: LLTrace::MemTrackable<LLVertexBuffer>("LLVertexBuffer"),
Leslie Linden
committed
LLRefCount(),
mNumVerts(0),
mNumIndices(0),
mAlignedOffset(0),
mAlignedIndexOffset(0),
mSize(0),
mIndicesSize(0),
mTypeMask(typemask),
mUsage(LLVertexBuffer::determineUsage(usage)),
mGLBuffer(0),
mGLIndices(0),
mGLArray(0),
mMappedData(NULL),
mMappedIndexData(NULL),
mMappedDataUsingVBOs(false),
mMappedIndexDataUsingVBOs(false),
mVertexLocked(false),
mIndexLocked(false),
mFinal(false),
mEmpty(true),
mMappable(false),
mFence(NULL)
{
mMappable = (mUsage == GL_DYNAMIC_DRAW_ARB && !sDisableVBOMapping);
//zero out offsets
for (U32 i = 0; i < TYPE_MAX; i++)
{
mOffsets[i] = 0;
}
sCount++;
}
//static
S32 LLVertexBuffer::calcOffsets(const U32& typemask, S32* offsets, S32 num_vertices)
{
for (S32 i=0; i<TYPE_TEXTURE_INDEX; i++)
{
U32 mask = 1<<i;
if (typemask & mask)
{
if (offsets && LLVertexBuffer::sTypeSize[i])
{
offset += LLVertexBuffer::sTypeSize[i]*num_vertices;
}
offsets[TYPE_TEXTURE_INDEX] = offsets[TYPE_VERTEX] + 12;
//static
S32 LLVertexBuffer::calcVertexSize(const U32& typemask)
{
S32 size = 0;
for (S32 i = 0; i < TYPE_TEXTURE_INDEX; i++)
{
U32 mask = 1<<i;
if (typemask & mask)
{
size += LLVertexBuffer::sTypeSize[i];
return size;
{
return mSize;
}
// protected, use unref()
//virtual
LLVertexBuffer::~LLVertexBuffer()
{
destroyGLBuffer();
destroyGLIndices();
if (mGLArray)
{
David Parks
committed
releaseVAOName(mGLArray);
}
Xiaohong Bao
committed
David Parks
committed
if (mFence)
{
// Sanity check. We have weird crashes in this destructor (on delete). Yet mFence is disabled.
// TODO: mFence was added in scope of SH-2038, but was never enabled, consider removing mFence.
LL_ERRS() << "LLVertexBuffer destruction failed" << LL_ENDL;
David Parks
committed
delete mFence;
mFence = NULL;
David Parks
committed
}
sVertexCount -= mNumVerts;
sIndexCount -= mNumIndices;
if (mMappedData)
{
LL_ERRS() << "Failed to clear vertex buffer's vertices" << LL_ENDL;
}
if (mMappedIndexData)
{
LL_ERRS() << "Failed to clear vertex buffer's indices" << LL_ENDL;
}
David Parks
committed
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
void LLVertexBuffer::placeFence() const
{
/*if (!mFence && useVBOs())
{
if (gGLManager.mHasSync)
{
mFence = new LLGLSyncFence();
}
}
if (mFence)
{
mFence->placeFence();
}*/
}
void LLVertexBuffer::waitFence() const
{
/*if (mFence)
{
mFence->wait();
}*/
}
//----------------------------------------------------------------------------
void LLVertexBuffer::genBuffer(U32 size)
{
disclaimMem(mSize);
mSize = vbo_block_size(size);
if (mUsage == GL_STREAM_DRAW_ARB)
{
mMappedData = sStreamVBOPool.allocate(mGLBuffer, mSize);
}
else if (mUsage == GL_DYNAMIC_DRAW_ARB)
{
mMappedData = sDynamicVBOPool.allocate(mGLBuffer, mSize);
}
else
{
mMappedData = sDynamicCopyVBOPool.allocate(mGLBuffer, mSize);
}
sGLCount++;
}
void LLVertexBuffer::genIndices(U32 size)
{
mIndicesSize = vbo_block_size(size);
if (mUsage == GL_STREAM_DRAW_ARB)
{
mMappedIndexData = sStreamIBOPool.allocate(mGLIndices, mIndicesSize);
}
else
{
mMappedIndexData = sDynamicIBOPool.allocate(mGLIndices, mIndicesSize);
}
sGLCount++;
}
void LLVertexBuffer::releaseBuffer()
{
if (mUsage == GL_STREAM_DRAW_ARB)
{
sStreamVBOPool.release(mGLBuffer, mMappedData, mSize);
}
else
{
sDynamicVBOPool.release(mGLBuffer, mMappedData, mSize);
}
mGLBuffer = 0;
mMappedData = NULL;
sGLCount--;
}
void LLVertexBuffer::releaseIndices()
{
if (mUsage == GL_STREAM_DRAW_ARB)
{
sStreamIBOPool.release(mGLIndices, mMappedIndexData, mIndicesSize);
}
{
sDynamicIBOPool.release(mGLIndices, mMappedIndexData, mIndicesSize);
}
mGLIndices = 0;
mMappedIndexData = NULL;
sGLCount--;
}
bool LLVertexBuffer::createGLBuffer(U32 size)
{
if (mGLBuffer)
{
destroyGLBuffer();
}
if (size == 0)
{
return true;
bool sucsess = true;
Leslie Linden
committed
mEmpty = true;
Leslie Linden
committed
mMappedDataUsingVBOs = useVBOs();
if (mMappedDataUsingVBOs)
genBuffer(size);
}
else
{
static int gl_buffer_idx = 0;
mGLBuffer = ++gl_buffer_idx;
mMappedData = (U8*)ll_aligned_malloc_16(size);
disclaimMem(mSize);
mSize = size;
if (!mMappedData)
{
sucsess = false;
}
return sucsess;
bool LLVertexBuffer::createGLIndices(U32 size)
{
if (mGLIndices)
{
destroyGLIndices();
}
if (size == 0)
{
return true;
bool sucsess = true;
Leslie Linden
committed
mEmpty = true;
//pad by 16 bytes for aligned copies
size += 16;
Leslie Linden
committed
mMappedIndexDataUsingVBOs = useVBOs();
if (mMappedIndexDataUsingVBOs)
//pad by another 16 bytes for VBO pointer adjustment
size += 16;
genIndices(size);
mMappedIndexData = (U8*)ll_aligned_malloc_16(size);
static int gl_buffer_idx = 0;
mGLIndices = ++gl_buffer_idx;
mIndicesSize = size;
if (!mMappedIndexData)
{
sucsess = false;
}
return sucsess;
}
void LLVertexBuffer::destroyGLBuffer()
{
AndreyL ProductEngine
committed
if (mGLBuffer || mMappedData)
Leslie Linden
committed
if (mMappedDataUsingVBOs)
releaseBuffer();
}
else
{
ll_aligned_free_16((void*)mMappedData);
mMappedData = NULL;
Leslie Linden
committed
mEmpty = true;
}
}
mGLBuffer = 0;
//unbind();
}
void LLVertexBuffer::destroyGLIndices()
{
AndreyL ProductEngine
committed
if (mGLIndices || mMappedIndexData)
Leslie Linden
committed
if (mMappedIndexDataUsingVBOs)
Loren Shih
committed
releaseIndices();
}
else
{
ll_aligned_free_16((void*)mMappedIndexData);
mMappedIndexData = NULL;
Leslie Linden
committed
mEmpty = true;
}
}
mGLIndices = 0;
//unbind();
bool LLVertexBuffer::updateNumVerts(S32 nverts)
bool sucsess = true;
{
LL_WARNS() << "Vertex buffer overflow!" << LL_ENDL;
}
U32 needed_size = calcOffsets(mTypeMask, mOffsets, nverts);
if (needed_size > mSize || needed_size <= mSize/2)
sucsess &= createGLBuffer(needed_size);
sVertexCount -= mNumVerts;
mNumVerts = nverts;
sVertexCount += mNumVerts;
return sucsess;
bool LLVertexBuffer::updateNumIndices(S32 nindices)
bool sucsess = true;
U32 needed_size = sizeof(U16) * nindices;
if (needed_size > mIndicesSize || needed_size <= mIndicesSize/2)
sucsess &= createGLIndices(needed_size);
sIndexCount -= mNumIndices;
mNumIndices = nindices;
sIndexCount += mNumIndices;
return sucsess;
bool LLVertexBuffer::allocateBuffer(S32 nverts, S32 nindices, bool create)
stop_glerror();
if (nverts < 0 || nindices < 0 ||
nverts > 65536)
{
LL_ERRS() << "Bad vertex buffer allocation: " << nverts << " : " << nindices << LL_ENDL;
bool sucsess = true;
sucsess &= updateNumVerts(nverts);
sucsess &= updateNumIndices(nindices);
if (create && (nverts || nindices))
//actually allocate space for the vertex buffer if using VBO mapping
flush(); //unmap
David Parks
committed
if (gGLManager.mHasVertexArrayObject && useVBOs() && sUseVAO)
{
David Parks
committed
mGLArray = getVAOName();
David Parks
committed
setupVertexArray();
}
return sucsess;
static LLTrace::BlockTimerStatHandle FTM_SETUP_VERTEX_ARRAY("Setup VAO");
David Parks
committed
void LLVertexBuffer::setupVertexArray()
{
if (!mGLArray)
{
return;
}
LL_RECORD_BLOCK_TIME(FTM_SETUP_VERTEX_ARRAY);
#if GL_ARB_vertex_array_object
glBindVertexArray(mGLArray);
#endif
sGLRenderArray = mGLArray;
David Parks
committed
David Parks
committed
{
3, //TYPE_VERTEX,
3, //TYPE_NORMAL,
2, //TYPE_TEXCOORD0,
2, //TYPE_TEXCOORD1,
2, //TYPE_TEXCOORD2,
2, //TYPE_TEXCOORD3,
4, //TYPE_COLOR,
David Parks
committed
4, //TYPE_EMISSIVE,
David Parks
committed
4, //TYPE_TANGENT,
David Parks
committed
1, //TYPE_WEIGHT,
4, //TYPE_WEIGHT4,
4, //TYPE_CLOTHWEIGHT,
David Parks
committed
1, //TYPE_TEXTURE_INDEX
David Parks
committed
};
David Parks
committed
{
GL_FLOAT, //TYPE_VERTEX,
GL_FLOAT, //TYPE_NORMAL,
GL_FLOAT, //TYPE_TEXCOORD0,
GL_FLOAT, //TYPE_TEXCOORD1,
GL_FLOAT, //TYPE_TEXCOORD2,
GL_FLOAT, //TYPE_TEXCOORD3,
GL_UNSIGNED_BYTE, //TYPE_COLOR,
GL_UNSIGNED_BYTE, //TYPE_EMISSIVE,
David Parks
committed
GL_FLOAT, //TYPE_TANGENT,
David Parks
committed
GL_FLOAT, //TYPE_WEIGHT,
GL_FLOAT, //TYPE_WEIGHT4,
GL_FLOAT, //TYPE_CLOTHWEIGHT,
David Parks
committed
GL_UNSIGNED_INT, //TYPE_TEXTURE_INDEX
David Parks
committed
};
David Parks
committed
{
false, //TYPE_VERTEX,
false, //TYPE_NORMAL,
false, //TYPE_TEXCOORD0,
false, //TYPE_TEXCOORD1,
false, //TYPE_TEXCOORD2,
false, //TYPE_TEXCOORD3,
false, //TYPE_COLOR,
false, //TYPE_EMISSIVE,
David Parks
committed
false, //TYPE_TANGENT,
David Parks
committed
false, //TYPE_WEIGHT,
false, //TYPE_WEIGHT4,
false, //TYPE_CLOTHWEIGHT,
true, //TYPE_TEXTURE_INDEX
David Parks
committed
};
David Parks
committed
{
GL_FALSE, //TYPE_VERTEX,
GL_FALSE, //TYPE_NORMAL,
GL_FALSE, //TYPE_TEXCOORD0,
GL_FALSE, //TYPE_TEXCOORD1,
GL_FALSE, //TYPE_TEXCOORD2,
GL_FALSE, //TYPE_TEXCOORD3,
GL_TRUE, //TYPE_COLOR,
GL_TRUE, //TYPE_EMISSIVE,
David Parks
committed
GL_FALSE, //TYPE_TANGENT,
David Parks
committed
GL_FALSE, //TYPE_WEIGHT,
GL_FALSE, //TYPE_WEIGHT4,
GL_FALSE, //TYPE_CLOTHWEIGHT,
GL_FALSE, //TYPE_TEXTURE_INDEX
David Parks
committed
};
bindGLBuffer(true);
bindGLIndices(true);
for (U32 i = 0; i < TYPE_MAX; ++i)
{
if (mTypeMask & (1 << i))
{
glEnableVertexAttribArrayARB(i);
David Parks
committed
David Parks
committed
{
David Parks
committed
//glVertexattribIPointer requires GLSL 1.30 or later
if (gGLManager.mGLSLVersionMajor > 1 || gGLManager.mGLSLVersionMinor >= 30)
{
// nat 2018-10-24: VS 2017 also notices the issue
// described below, and warns even with reinterpret_cast.
// Cast via intptr_t to make it painfully obvious to the
// compiler that we're doing this intentionally.
glVertexAttribIPointer(i, attrib_size[i], attrib_type[i], sTypeSize[i],
reinterpret_cast<const GLvoid*>(intptr_t(mOffsets[i])));
David Parks
committed
}
David Parks
committed
}
else
{
// nat 2016-12-16: With 64-bit clang compile, the compiler
// produces an error if we simply cast mOffsets[i] -- an S32
// -- to (GLvoid *), the type of the parameter. It correctly
// points out that there's no way an S32 could fit a real
// pointer value. Ruslan asserts that in this case the last
// param is interpreted as an array data offset within the VBO
// rather than as an actual pointer, so it's okay.
glVertexAttribPointerARB(i, attrib_size[i], attrib_type[i],
attrib_normalized[i], sTypeSize[i],
reinterpret_cast<GLvoid*>(intptr_t(mOffsets[i])));
David Parks
committed
}
David Parks
committed
}
else
{
glDisableVertexAttribArrayARB(i);
}
}
//draw a dummy triangle to set index array pointer
//glDrawElements(GL_TRIANGLES, 0, GL_UNSIGNED_SHORT, NULL);
David Parks
committed
unbind();
David Parks
committed
}
bool LLVertexBuffer::resizeBuffer(S32 newnverts, S32 newnindices)
success &= updateNumVerts(newnverts);
success &= updateNumIndices(newnindices);
if (useVBOs())
{
flush(); //unmap
David Parks
committed
if (mGLArray)
{ //if size changed, offsets changed
setupVertexArray();
}
}
Leslie Linden
committed
bool LLVertexBuffer::useVBOs() const
//it's generally ineffective to use VBO for things that are streaming on apple
andreykproductengine
committed
return (mUsage != 0);
}
//----------------------------------------------------------------------------
bool expand_region(LLVertexBuffer::MappedRegion& region, S32 index, S32 count)
{
S32 end = index+count;
S32 region_end = region.mIndex+region.mCount;
if (end < region.mIndex ||
index > region_end)
{ //gap exists, do not merge
return false;
}
S32 new_end = llmax(end, region_end);
S32 new_index = llmin(index, region.mIndex);
region.mIndex = new_index;
region.mCount = new_end-new_index;
return true;
}
static LLTrace::BlockTimerStatHandle FTM_VBO_MAP_BUFFER_RANGE("VBO Map Range");
static LLTrace::BlockTimerStatHandle FTM_VBO_MAP_BUFFER("VBO Map");
// Map for data access
volatile U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 index, S32 count, bool map_range)
David Parks
committed
bindGLBuffer(true);
LL_ERRS() << "LLVertexBuffer::mapVeretxBuffer() called on a finalized buffer." << LL_ENDL;
if (!useVBOs() && !mMappedData && !mMappedIndexData)
LL_ERRS() << "LLVertexBuffer::mapVertexBuffer() called on unallocated buffer." << LL_ENDL;
if (useVBOs())
if (!mMappable || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
David Parks
committed
count = mNumVerts-index;
}
bool mapped = false;
//see if range is already mapped
for (U32 i = 0; i < mMappedVertexRegions.size(); ++i)
{
MappedRegion& region = mMappedVertexRegions[i];
if (region.mType == type)
{
if (expand_region(region, index, count))
{
mapped = true;
David Parks
committed
break;
}
}
}
if (!mapped)
{
//not already mapped, map new region
MappedRegion region(type, mMappable && map_range ? -1 : index, count);
mMappedVertexRegions.push_back(region);
}
}
if (mVertexLocked && map_range)
{
LL_ERRS() << "Attempted to map a specific range of a buffer that was already mapped." << LL_ENDL;
}
if (!mVertexLocked)
Leslie Linden
committed
mVertexLocked = true;
David Parks
committed
sMappedCount++;
stop_glerror();
Xiaohong Bao
committed
Xiaohong Bao
committed
{
map_range = false;
Xiaohong Bao
committed
}
else
{
volatile U8* src = NULL;
David Parks
committed
waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
LL_RECORD_BLOCK_TIME(FTM_VBO_MAP_BUFFER_RANGE);
S32 offset = mOffsets[type] + sTypeSize[type]*index;
S32 length = (sTypeSize[type]*count+0xF) & ~0xF;
David Parks
committed
src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, offset, length,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT |
GL_MAP_INVALIDATE_RANGE_BIT);
}
else
{
if (gDebugGL)
{
GLint size = 0;
glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size);
if (size < mSize)
{
LL_ERRS() << "Invalid buffer size." << LL_ENDL;
}
}
LL_RECORD_BLOCK_TIME(FTM_VBO_MAP_BUFFER);
David Parks
committed
src = (U8*) glMapBufferRange(GL_ARRAY_BUFFER_ARB, 0, mSize,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT);
#endif
}
}
else if (gGLManager.mHasFlushBufferRange)
{
if (map_range)
{
Nyx Linden
committed
#ifndef LL_MESA_HEADLESS
glBufferParameteriAPPLE(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE);
glBufferParameteriAPPLE(GL_ARRAY_BUFFER_ARB, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE);
Nyx Linden
committed
#endif
src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
else
{
src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
}
else
{
map_range = false;
src = (U8*) glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
mMappedData = LL_NEXT_ALIGNED_ADDRESS<volatile U8>(src);
Loren Shih
committed
Xiaohong Bao
committed
}
if (!mMappedData)
{
log_glerror();
Leslie Linden
committed
//check the availability of memory
LLMemory::logMemoryInfo(true);
{
//--------------------
//print out more debug info before crash
LL_INFOS() << "vertex buffer size: (num verts : num indices) = " << getNumVerts() << " : " << getNumIndices() << LL_ENDL;
GLint size;
glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size);
LL_INFOS() << "GL_ARRAY_BUFFER_ARB size is " << size << LL_ENDL;
//--------------------
GLint buff;
glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff);
if ((GLuint)buff != mGLBuffer)
{
LL_ERRS() << "Invalid GL vertex buffer bound: " << buff << LL_ENDL;
}
LL_ERRS() << "glMapBuffer returned NULL (no vertex data)" << LL_ENDL;
}
else
Xiaohong Bao
committed
{
LL_ERRS() << "memory allocation for vertex data failed." << LL_ENDL;
Xiaohong Bao
committed
}
}
else
{
map_range = false;
Xiaohong Bao
committed
}
if (map_range && gGLManager.mHasMapBufferRange && mMappable)
{
return mMappedData;
}
else
{
return mMappedData+mOffsets[type]+sTypeSize[type]*index;
}
Xiaohong Bao
committed
}
static LLTrace::BlockTimerStatHandle FTM_VBO_MAP_INDEX_RANGE("IBO Map Range");
static LLTrace::BlockTimerStatHandle FTM_VBO_MAP_INDEX("IBO Map");
volatile U8* LLVertexBuffer::mapIndexBuffer(S32 index, S32 count, bool map_range)
Xiaohong Bao
committed
{
David Parks
committed
bindGLIndices(true);
Xiaohong Bao
committed
if (mFinal)
{
LL_ERRS() << "LLVertexBuffer::mapIndexBuffer() called on a finalized buffer." << LL_ENDL;
Xiaohong Bao
committed
}
if (!useVBOs() && !mMappedData && !mMappedIndexData)
{
LL_ERRS() << "LLVertexBuffer::mapIndexBuffer() called on unallocated buffer." << LL_ENDL;
Xiaohong Bao
committed
}
if (useVBOs())
Xiaohong Bao
committed
{
if (!mMappable || gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (count == -1)
{
David Parks
committed
count = mNumIndices-index;
}
bool mapped = false;
//see if range is already mapped
for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
{
MappedRegion& region = mMappedIndexRegions[i];
if (expand_region(region, index, count))
{
mapped = true;
David Parks
committed
break;
}
}
if (!mapped)
{
//not already mapped, map new region
MappedRegion region(TYPE_INDEX, mMappable && map_range ? -1 : index, count);
mMappedIndexRegions.push_back(region);
}
}
if (mIndexLocked && map_range)
{
LL_ERRS() << "Attempted to map a specific range of a buffer that was already mapped." << LL_ENDL;
}
if (!mIndexLocked)
Xiaohong Bao
committed
{
Leslie Linden
committed
mIndexLocked = true;
David Parks
committed
sMappedCount++;
Xiaohong Bao
committed
stop_glerror();
David Parks
committed
if (gDebugGL && useVBOs())
{
GLint elem = 0;
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &elem);
if (elem != mGLIndices)
{
LL_ERRS() << "Wrong index buffer bound!" << LL_ENDL;
David Parks
committed
}
}
Xiaohong Bao
committed
{
map_range = false;
Xiaohong Bao
committed
}
else
{
volatile U8* src = NULL;
David Parks
committed
waitFence();
if (gGLManager.mHasMapBufferRange)
{
if (map_range)
{
LL_RECORD_BLOCK_TIME(FTM_VBO_MAP_INDEX_RANGE);
S32 offset = sizeof(U16)*index;
S32 length = sizeof(U16)*count;
David Parks
committed
src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT |
GL_MAP_INVALIDATE_RANGE_BIT);
}
else
{
LL_RECORD_BLOCK_TIME(FTM_VBO_MAP_INDEX);
David Parks
committed
src = (U8*) glMapBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, sizeof(U16)*mNumIndices,
GL_MAP_WRITE_BIT |
GL_MAP_FLUSH_EXPLICIT_BIT);
#endif
}
}
else if (gGLManager.mHasFlushBufferRange)
{
if (map_range)
{
Nyx Linden
committed
#ifndef LL_MESA_HEADLESS
glBufferParameteriAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE);
glBufferParameteriAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE);
Nyx Linden
committed
#endif
src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
else
{
src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
}
else
{
LL_RECORD_BLOCK_TIME(FTM_VBO_MAP_INDEX);
map_range = false;
src = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
}
mMappedIndexData = src; //LL_NEXT_ALIGNED_ADDRESS<U8>(src);
mAlignedIndexOffset = mMappedIndexData - src;
stop_glerror();
Xiaohong Bao
committed
}
}
if (!mMappedIndexData)
{
log_glerror();
LLMemory::logMemoryInfo(true);
Xiaohong Bao
committed
GLint buff;
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
if ((GLuint)buff != mGLIndices)
{
LL_ERRS() << "Invalid GL index buffer bound: " << buff << LL_ENDL;
Xiaohong Bao
committed
}
LL_ERRS() << "glMapBuffer returned NULL (no index data)" << LL_ENDL;
Xiaohong Bao
committed
}
else
{
LL_ERRS() << "memory allocation for Index data failed. " << LL_ENDL;
Xiaohong Bao
committed
}
else
{
map_range = false;
}
Xiaohong Bao
committed
if (map_range && gGLManager.mHasMapBufferRange && mMappable)
{
return mMappedIndexData;
}
else
{
return mMappedIndexData + sizeof(U16)*index;
}
static LLTrace::BlockTimerStatHandle FTM_VBO_UNMAP("VBO Unmap");
static LLTrace::BlockTimerStatHandle FTM_VBO_FLUSH_RANGE("Flush VBO Range");
static LLTrace::BlockTimerStatHandle FTM_IBO_UNMAP("IBO Unmap");
static LLTrace::BlockTimerStatHandle FTM_IBO_FLUSH_RANGE("Flush IBO Range");
David Parks
committed
void LLVertexBuffer::unmapBuffer()
David Parks
committed
if (!useVBOs())
Xiaohong Bao
committed
{
return; //nothing to unmap
Xiaohong Bao
committed
}
bool updated_all = false;
David Parks
committed
if (mMappedData && mVertexLocked)
LL_RECORD_BLOCK_TIME(FTM_VBO_UNMAP);
David Parks
committed
bindGLBuffer(true);
updated_all = mIndexLocked; //both vertex and index buffers done updating
Xiaohong Bao
committed
Xiaohong Bao
committed
{
David Parks
committed
if (!mMappedVertexRegions.empty())
{
stop_glerror();
for (U32 i = 0; i < mMappedVertexRegions.size(); ++i)
{
const MappedRegion& region = mMappedVertexRegions[i];
S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0;
S32 length = sTypeSize[region.mType]*region.mCount;
if (mSize >= length + offset)
{
glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, offset, length, (U8*)mMappedData + offset);
}
else
{
GLint size = 0;
glGetBufferParameterivARB(GL_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size);
LL_WARNS() << "Attempted to map regions to a buffer that is too small, "
<< "mapped size: " << mSize
<< ", gl buffer size: " << size
<< ", length: " << length
<< ", offset: " << offset
<< LL_ENDL;
}
David Parks
committed
stop_glerror();
}
mMappedVertexRegions.clear();
}
else
{
stop_glerror();
glBufferDataARB(GL_ARRAY_BUFFER_ARB, getSize(), nullptr, mUsage); // <alchemy/>
glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), (U8*) mMappedData);
David Parks
committed
stop_glerror();
}
Xiaohong Bao
committed
}
else
{
if (gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (!mMappedVertexRegions.empty())
{
stop_glerror();
for (U32 i = 0; i < mMappedVertexRegions.size(); ++i)
{
const MappedRegion& region = mMappedVertexRegions[i];
S32 offset = region.mIndex >= 0 ? mOffsets[region.mType]+sTypeSize[region.mType]*region.mIndex : 0;
S32 length = sTypeSize[region.mType]*region.mCount;
if (gGLManager.mHasMapBufferRange)
{
LL_RECORD_BLOCK_TIME(FTM_VBO_FLUSH_RANGE);
#ifdef GL_ARB_map_buffer_range
glFlushMappedBufferRange(GL_ARRAY_BUFFER_ARB, offset, length);
#endif
}
else if (gGLManager.mHasFlushBufferRange)
{
Nyx Linden
committed
#ifndef LL_MESA_HEADLESS
glFlushMappedBufferRangeAPPLE(GL_ARRAY_BUFFER_ARB, offset, length);
Nyx Linden
committed
#endif
stop_glerror();
}
mMappedVertexRegions.clear();
}
}
Xiaohong Bao
committed
stop_glerror();
glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
stop_glerror();
Xiaohong Bao
committed
mMappedData = NULL;
}
Leslie Linden
committed
mVertexLocked = false;
Xiaohong Bao
committed
sMappedCount--;
}
David Parks
committed
if (mMappedIndexData && mIndexLocked)
Xiaohong Bao
committed
{
LL_RECORD_BLOCK_TIME(FTM_IBO_UNMAP);
David Parks
committed
bindGLIndices();
Xiaohong Bao
committed
{
David Parks
committed
if (!mMappedIndexRegions.empty())
{
for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
{
const MappedRegion& region = mMappedIndexRegions[i];
S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0;
S32 length = sizeof(U16)*region.mCount;
if (mIndicesSize >= length + offset)
{
glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length, (U8*) mMappedIndexData+offset);
}
else
{
GLint size = 0;
glGetBufferParameterivARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_BUFFER_SIZE_ARB, &size);
LL_WARNS() << "Attempted to map regions to a buffer that is too small, "
<< "mapped size: " << mIndicesSize
<< ", gl buffer size: " << size
<< ", length: " << length
<< ", offset: " << offset
<< LL_ENDL;
}
David Parks
committed
stop_glerror();
}
mMappedIndexRegions.clear();
}
else
{
stop_glerror();
glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, getIndicesSize(), nullptr, mUsage); // <alchemy/>
glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), (U8*) mMappedIndexData);
David Parks
committed
stop_glerror();
}
Xiaohong Bao
committed
}
else
{
if (gGLManager.mHasMapBufferRange || gGLManager.mHasFlushBufferRange)
{
if (!mMappedIndexRegions.empty())
{
for (U32 i = 0; i < mMappedIndexRegions.size(); ++i)
{
const MappedRegion& region = mMappedIndexRegions[i];
S32 offset = region.mIndex >= 0 ? sizeof(U16)*region.mIndex : 0;
S32 length = sizeof(U16)*region.mCount;
if (gGLManager.mHasMapBufferRange)
{
LL_RECORD_BLOCK_TIME(FTM_IBO_FLUSH_RANGE);
#ifdef GL_ARB_map_buffer_range
glFlushMappedBufferRange(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
#endif
}
else if (gGLManager.mHasFlushBufferRange)
{
David Parks
committed
#ifdef GL_APPLE_flush_buffer_range
Nyx Linden
committed
#ifndef LL_MESA_HEADLESS
glFlushMappedBufferRangeAPPLE(GL_ELEMENT_ARRAY_BUFFER_ARB, offset, length);
Nyx Linden
committed
#endif
David Parks
committed
#endif
stop_glerror();
}
mMappedIndexRegions.clear();
}
}
Xiaohong Bao
committed
stop_glerror();
glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
stop_glerror();
mMappedIndexData = NULL;
Xiaohong Bao
committed
}
mIndexLocked = false;
Xiaohong Bao
committed
sMappedCount--;
}
if(updated_all)
{
Leslie Linden
committed
mEmpty = false;
}
}
//----------------------------------------------------------------------------
{
typedef LLStrider<T> strider_t;
static bool get(LLVertexBuffer& vbo,
strider_t& strider,
S32 index, S32 count, bool map_range)
S32 stride = LLVertexBuffer::sTypeSize[type];
Xiaohong Bao
committed
volatile U8* ptr = vbo.mapVertexBuffer(type, index, count, map_range);
Xiaohong Bao
committed
{
LL_WARNS() << "mapVertexBuffer failed!" << LL_ENDL;
Leslie Linden
committed
return false;
Xiaohong Bao
committed
}
strider = (T*)ptr;
strider.setStride(stride);
Leslie Linden
committed
return true;
LL_ERRS() << "VertexBufferStrider could not find valid vertex data." << LL_ENDL;
Leslie Linden
committed
return false;
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
template<class T>
struct VertexBufferStrider<T, LLVertexBuffer::TYPE_INDEX>
{
typedef LLStrider<T> strider_t;
static bool get(LLVertexBuffer& vbo,
strider_t& strider,
S32 index, S32 count, bool map_range)
{
volatile U8* ptr = vbo.mapIndexBuffer(index, count, map_range);
if (ptr == nullptr)
{
LL_WARNS() << "mapIndexBuffer failed!" << LL_ENDL;
return false;
}
strider = (T*) ptr;
strider.setStride(0);
return true;
}
};
bool LLVertexBuffer::getVertexStrider(LLStrider<LLVector3>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector3,TYPE_VERTEX>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getVertexStrider(LLStrider<LLVector4a>& strider, S32 index, S32 count, bool map_range)
{
return VertexBufferStrider<LLVector4a,TYPE_VERTEX>::get(*this, strider, index, count, map_range);
}
bool LLVertexBuffer::getIndexStrider(LLStrider<U16>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<U16,TYPE_INDEX>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getTexCoord0Strider(LLStrider<LLVector2>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector2,TYPE_TEXCOORD0>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getTexCoord1Strider(LLStrider<LLVector2>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector2,TYPE_TEXCOORD1>::get(*this, strider, index, count, map_range);
David Parks
committed
bool LLVertexBuffer::getTexCoord2Strider(LLStrider<LLVector2>& strider, S32 index, S32 count, bool map_range)
{
return VertexBufferStrider<LLVector2,TYPE_TEXCOORD2>::get(*this, strider, index, count, map_range);
}
bool LLVertexBuffer::getNormalStrider(LLStrider<LLVector3>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector3,TYPE_NORMAL>::get(*this, strider, index, count, map_range);
David Parks
committed
bool LLVertexBuffer::getTangentStrider(LLStrider<LLVector3>& strider, S32 index, S32 count, bool map_range)
David Parks
committed
return VertexBufferStrider<LLVector3,TYPE_TANGENT>::get(*this, strider, index, count, map_range);
}
bool LLVertexBuffer::getTangentStrider(LLStrider<LLVector4a>& strider, S32 index, S32 count, bool map_range)
{
return VertexBufferStrider<LLVector4a,TYPE_TANGENT>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getColorStrider(LLStrider<LLColor4U>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLColor4U,TYPE_COLOR>::get(*this, strider, index, count, map_range);
David Parks
committed
bool LLVertexBuffer::getEmissiveStrider(LLStrider<LLColor4U>& strider, S32 index, S32 count, bool map_range)
David Parks
committed
return VertexBufferStrider<LLColor4U,TYPE_EMISSIVE>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getWeightStrider(LLStrider<F32>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<F32,TYPE_WEIGHT>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getWeight4Strider(LLStrider<LLVector4>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector4,TYPE_WEIGHT4>::get(*this, strider, index, count, map_range);
bool LLVertexBuffer::getClothWeightStrider(LLStrider<LLVector4>& strider, S32 index, S32 count, bool map_range)
return VertexBufferStrider<LLVector4,TYPE_CLOTHWEIGHT>::get(*this, strider, index, count, map_range);
}
//----------------------------------------------------------------------------
static LLTrace::BlockTimerStatHandle FTM_BIND_GL_ARRAY("Bind Array");
David Parks
committed
bool LLVertexBuffer::bindGLArray()
{
if (mGLArray && sGLRenderArray != mGLArray)
{
//LL_RECORD_BLOCK_TIME(FTM_BIND_GL_ARRAY);
David Parks
committed
//really shouldn't be necessary, but some drivers don't properly restore the
//state of GL_ELEMENT_ARRAY_BUFFER_BINDING
bindGLIndices();
David Parks
committed
return true;
}
return false;
}
static LLTrace::BlockTimerStatHandle FTM_BIND_GL_BUFFER("Bind Buffer");
David Parks
committed
bool LLVertexBuffer::bindGLBuffer(bool force_bind)
{
bindGLArray();
bool ret = false;
if (useVBOs() && (force_bind || (mGLBuffer && (mGLBuffer != sGLRenderBuffer || !sVBOActive))))
{
David Parks
committed
glBindBufferARB(GL_ARRAY_BUFFER_ARB, mGLBuffer);
sGLRenderBuffer = mGLBuffer;
sBindCount++;
David Parks
committed
llassert(!mGLArray || sGLRenderArray == mGLArray);
David Parks
committed
ret = true;
}
return ret;
}
static LLTrace::BlockTimerStatHandle FTM_BIND_GL_INDICES("Bind Indices");
David Parks
committed
bool LLVertexBuffer::bindGLIndices(bool force_bind)
{
bindGLArray();
bool ret = false;
if (useVBOs() && (force_bind || (mGLIndices && (mGLIndices != sGLRenderIndices || !sIBOActive))))
{
//LL_RECORD_BLOCK_TIME(FTM_BIND_GL_INDICES);
David Parks
committed
/*if (sMapped)
{
LL_ERRS() << "VBO bound while another VBO mapped!" << LL_ENDL;
David Parks
committed
}*/
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, mGLIndices);
sGLRenderIndices = mGLIndices;
stop_glerror();
sBindCount++;
David Parks
committed
ret = true;
}
return ret;
}
void LLVertexBuffer::flush()
{
if (useVBOs())
{
unmapBuffer();
}
}
David Parks
committed
// bind for transform feedback (quick 'n dirty)
void LLVertexBuffer::bindForFeedback(U32 channel, U32 type, U32 index, U32 count)
{
David Parks
committed
U32 offset = mOffsets[type] + sTypeSize[type]*index;
U32 size= (sTypeSize[type]*count);
glBindBufferRange(GL_TRANSFORM_FEEDBACK_BUFFER, channel, mGLBuffer, offset, size);
David Parks
committed
}
// Set for rendering
David Parks
committed
void LLVertexBuffer::setBuffer(U32 data_mask)
David Parks
committed
flush();
//set up pointers if the data mask is different ...
Leslie Linden
committed
bool setup = (sLastMask != data_mask);
David Parks
committed
if (gDebugGL && data_mask != 0)
David Parks
committed
{ //make sure data requirements are fulfilled
David Parks
committed
LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
if (shader)
{
U32 required_mask = 0;
David Parks
committed
for (U32 i = 0; i < LLVertexBuffer::TYPE_TEXTURE_INDEX; ++i)
David Parks
committed
{
if (shader->getAttribLocation(i) > -1)
{
U32 required = 1 << i;
if ((data_mask & required) == 0)
{
LL_WARNS() << "Missing attribute: " << LLShaderMgr::instance()->mReservedAttribs[i] << LL_ENDL;
David Parks
committed
}
required_mask |= required;
}
}
if ((data_mask & required_mask) != required_mask)
{
Graham Linden
committed
U32 unsatisfied_mask = (required_mask & ~data_mask);
Graham Madarasz (Graham Linden)
committed
for (U32 i = 0; i < TYPE_MAX; i++)
{
Graham Madarasz (Graham Linden)
committed
U32 unsatisfied_flag = unsatisfied_mask & (1 << i);
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
switch (unsatisfied_flag)
{
case 0: break;
case MAP_VERTEX: LL_INFOS() << "Missing vert pos" << LL_ENDL; break;
case MAP_NORMAL: LL_INFOS() << "Missing normals" << LL_ENDL; break;
case MAP_TEXCOORD0: LL_INFOS() << "Missing TC 0" << LL_ENDL; break;
case MAP_TEXCOORD1: LL_INFOS() << "Missing TC 1" << LL_ENDL; break;
case MAP_TEXCOORD2: LL_INFOS() << "Missing TC 2" << LL_ENDL; break;
case MAP_TEXCOORD3: LL_INFOS() << "Missing TC 3" << LL_ENDL; break;
case MAP_COLOR: LL_INFOS() << "Missing vert color" << LL_ENDL; break;
case MAP_EMISSIVE: LL_INFOS() << "Missing emissive" << LL_ENDL; break;
case MAP_TANGENT: LL_INFOS() << "Missing tangent" << LL_ENDL; break;
case MAP_WEIGHT: LL_INFOS() << "Missing weight" << LL_ENDL; break;
case MAP_WEIGHT4: LL_INFOS() << "Missing weightx4" << LL_ENDL; break;
case MAP_CLOTHWEIGHT: LL_INFOS() << "Missing clothweight" << LL_ENDL; break;
case MAP_TEXTURE_INDEX: LL_INFOS() << "Missing tex index" << LL_ENDL; break;
default: LL_INFOS() << "Missing who effin knows: " << unsatisfied_flag << LL_ENDL;
}
}
// TYPE_INDEX is beyond TYPE_MAX, so check for it individually
if (unsatisfied_mask & (1 << TYPE_INDEX))
{
LL_INFOS() << "Missing indices" << LL_ENDL;
}
Graham Madarasz (Graham Linden)
committed
LL_ERRS() << "Shader consumption mismatches data provision." << LL_ENDL;
David Parks
committed
}
}
}
if (useVBOs())
{
David Parks
committed
if (mGLArray)
{
David Parks
committed
bindGLArray();
Leslie Linden
committed
setup = false; //do NOT perform pointer setup if using VAO
}
David Parks
committed
else
Leslie Linden
committed
const bool bindBuffer = bindGLBuffer();
const bool bindIndices = bindGLIndices();
setup = setup || bindBuffer || bindIndices;
David Parks
committed
if (gDebugGL && !mGLArray)
{
GLint buff;
glGetIntegerv(GL_ARRAY_BUFFER_BINDING_ARB, &buff);
if (gDebugSession)
{
gFailLog << "Invalid GL vertex buffer bound: " << buff << std::endl;
}
else
{
LL_ERRS() << "Invalid GL vertex buffer bound: " << buff << LL_ENDL;
glGetIntegerv(GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB, &buff);
if ((GLuint)buff != mGLIndices)
if (gDebugSession)
{
gFailLog << "Invalid GL index buffer bound: " << buff << std::endl;
}
else
{
LL_ERRS() << "Invalid GL index buffer bound: " << buff << LL_ENDL;
David Parks
committed
}
else
{
if (sGLRenderArray)
{
David Parks
committed
glBindVertexArray(0);
David Parks
committed
sGLRenderArray = 0;
David Parks
committed
sGLRenderIndices = 0;
}
if (mGLBuffer)
{
{
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
sBindCount++;
Leslie Linden
committed
setup = true; // ... or a VBO is deactivated
}
if (sGLRenderBuffer != mGLBuffer)
{
David Parks
committed
sGLRenderBuffer = mGLBuffer;
Leslie Linden
committed
setup = true; // ... or a client memory pointer changed
David Parks
committed
if (mGLIndices)
David Parks
committed
if (sIBOActive)
{
David Parks
committed
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
sBindCount++;
David Parks
committed
}
sGLRenderIndices = mGLIndices;
David Parks
committed
if (!mGLArray)
setupClientArrays(data_mask);
David Parks
committed
if (mGLBuffer)
{
if (data_mask && setup)
{
setupVertexBuffer(data_mask); // subclass specific setup (virtual function)
sSetCount++;
}
}
}
// virtual (default)
David Parks
committed
void LLVertexBuffer::setupVertexBuffer(U32 data_mask)
{
stop_glerror();
volatile U8* base = useVBOs() ? (U8*) mAlignedOffset : mMappedData;
if (gDebugGL && ((data_mask & mTypeMask) != data_mask))
for (U32 i = 0; i < LLVertexBuffer::TYPE_MAX; ++i)
{
U32 mask = 1 << i;
if (mask & data_mask && !(mask & mTypeMask))
{ //bit set in data_mask, but not set in mTypeMask
LL_WARNS() << "Missing required component " << vb_type_name[i] << LL_ENDL;
}
}
LL_ERRS() << "LLVertexBuffer::setupVertexBuffer missing required components for supplied data mask." << LL_ENDL;
David Parks
committed
if (LLGLSLShader::sNoFixedFunction)
David Parks
committed
if (data_mask & MAP_NORMAL)
David Parks
committed
S32 loc = TYPE_NORMAL;
void* ptr = (void*)(base + mOffsets[TYPE_NORMAL]);
David Parks
committed
glVertexAttribPointerARB(loc, 3, GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_NORMAL], ptr);
David Parks
committed
if (data_mask & MAP_TEXCOORD3)
David Parks
committed
S32 loc = TYPE_TEXCOORD3;
void* ptr = (void*)(base + mOffsets[TYPE_TEXCOORD3]);
David Parks
committed
glVertexAttribPointerARB(loc,2,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD3], ptr);
David Parks
committed
if (data_mask & MAP_TEXCOORD2)
David Parks
committed
S32 loc = TYPE_TEXCOORD2;
void* ptr = (void*)(base + mOffsets[TYPE_TEXCOORD2]);
David Parks
committed
glVertexAttribPointerARB(loc,2,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD2], ptr);
David Parks
committed
if (data_mask & MAP_TEXCOORD1)
David Parks
committed
S32 loc = TYPE_TEXCOORD1;
void* ptr = (void*)(base + mOffsets[TYPE_TEXCOORD1]);
David Parks
committed
glVertexAttribPointerARB(loc,2,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD1], ptr);
David Parks
committed
if (data_mask & MAP_TANGENT)
David Parks
committed
S32 loc = TYPE_TANGENT;
void* ptr = (void*)(base + mOffsets[TYPE_TANGENT]);
glVertexAttribPointerARB(loc, 4,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_TANGENT], ptr);
David Parks
committed
if (data_mask & MAP_TEXCOORD0)
David Parks
committed
S32 loc = TYPE_TEXCOORD0;
void* ptr = (void*)(base + mOffsets[TYPE_TEXCOORD0]);
David Parks
committed
glVertexAttribPointerARB(loc,2,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD0], ptr);
David Parks
committed
if (data_mask & MAP_COLOR)
David Parks
committed
S32 loc = TYPE_COLOR;
//bind emissive instead of color pointer if emissive is present
void* ptr = (data_mask & MAP_EMISSIVE) ? (void*)(base + mOffsets[TYPE_EMISSIVE]) : (void*)(base + mOffsets[TYPE_COLOR]);
David Parks
committed
glVertexAttribPointerARB(loc, 4, GL_UNSIGNED_BYTE, GL_TRUE, LLVertexBuffer::sTypeSize[TYPE_COLOR], ptr);
David Parks
committed
if (data_mask & MAP_EMISSIVE)
David Parks
committed
S32 loc = TYPE_EMISSIVE;
void* ptr = (void*)(base + mOffsets[TYPE_EMISSIVE]);
David Parks
committed
glVertexAttribPointerARB(loc, 4, GL_UNSIGNED_BYTE, GL_TRUE, LLVertexBuffer::sTypeSize[TYPE_EMISSIVE], ptr);
if (!(data_mask & MAP_COLOR))
{ //map emissive to color channel when color is not also being bound to avoid unnecessary shader swaps
loc = TYPE_COLOR;
glVertexAttribPointerARB(loc, 4, GL_UNSIGNED_BYTE, GL_TRUE, LLVertexBuffer::sTypeSize[TYPE_EMISSIVE], ptr);
}
David Parks
committed
if (data_mask & MAP_WEIGHT)
David Parks
committed
S32 loc = TYPE_WEIGHT;
void* ptr = (void*)(base + mOffsets[TYPE_WEIGHT]);
Leslie Linden
committed
glVertexAttribPointerARB(loc, 1, GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_WEIGHT], ptr);
David Parks
committed
if (data_mask & MAP_WEIGHT4)
David Parks
committed
S32 loc = TYPE_WEIGHT4;
void* ptr = (void*)(base+mOffsets[TYPE_WEIGHT4]);
Leslie Linden
committed
glVertexAttribPointerARB(loc, 4, GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_WEIGHT4], ptr);
David Parks
committed
if (data_mask & MAP_CLOTHWEIGHT)
David Parks
committed
S32 loc = TYPE_CLOTHWEIGHT;
void* ptr = (void*)(base + mOffsets[TYPE_CLOTHWEIGHT]);
Leslie Linden
committed
glVertexAttribPointerARB(loc, 4, GL_FLOAT, GL_TRUE, LLVertexBuffer::sTypeSize[TYPE_CLOTHWEIGHT], ptr);
David Parks
committed
if (data_mask & MAP_TEXTURE_INDEX &&
(gGLManager.mGLSLVersionMajor >= 2 || gGLManager.mGLSLVersionMinor >= 30)) //indexed texture rendering requires GLSL 1.30 or later
S32 loc = TYPE_TEXTURE_INDEX;
David Parks
committed
void *ptr = (void*) (base + mOffsets[TYPE_VERTEX] + 12);
David Parks
committed
glVertexAttribIPointer(loc, 1, GL_UNSIGNED_INT, LLVertexBuffer::sTypeSize[TYPE_VERTEX], ptr);
David Parks
committed
if (data_mask & MAP_VERTEX)
{
S32 loc = TYPE_VERTEX;
void* ptr = (void*)(base + mOffsets[TYPE_VERTEX]);
glVertexAttribPointerARB(loc, 3,GL_FLOAT, GL_FALSE, LLVertexBuffer::sTypeSize[TYPE_VERTEX], ptr);
David Parks
committed
}
}
else
David Parks
committed
if (data_mask & MAP_NORMAL)
David Parks
committed
glNormalPointer(GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_NORMAL], (void*)(base + mOffsets[TYPE_NORMAL]));
David Parks
committed
if (data_mask & MAP_TEXCOORD3)
{
David Parks
committed
glClientActiveTextureARB(GL_TEXTURE3_ARB);
glTexCoordPointer(2,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD3], (void*)(base + mOffsets[TYPE_TEXCOORD3]));
glClientActiveTextureARB(GL_TEXTURE0_ARB);
David Parks
committed
if (data_mask & MAP_TEXCOORD2)
David Parks
committed
{
David Parks
committed
glClientActiveTextureARB(GL_TEXTURE2_ARB);
glTexCoordPointer(2,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD2], (void*)(base + mOffsets[TYPE_TEXCOORD2]));
glClientActiveTextureARB(GL_TEXTURE0_ARB);
David Parks
committed
}
David Parks
committed
if (data_mask & MAP_TEXCOORD1)
David Parks
committed
glClientActiveTextureARB(GL_TEXTURE1_ARB);
glTexCoordPointer(2,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD1], (void*)(base + mOffsets[TYPE_TEXCOORD1]));
glClientActiveTextureARB(GL_TEXTURE0_ARB);
David Parks
committed
if (data_mask & MAP_TANGENT)
{
David Parks
committed
glClientActiveTextureARB(GL_TEXTURE2_ARB);
David Parks
committed
glTexCoordPointer(4,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_TANGENT], (void*)(base + mOffsets[TYPE_TANGENT]));
David Parks
committed
glClientActiveTextureARB(GL_TEXTURE0_ARB);
David Parks
committed
if (data_mask & MAP_TEXCOORD0)
David Parks
committed
{
David Parks
committed
glTexCoordPointer(2,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_TEXCOORD0], (void*)(base + mOffsets[TYPE_TEXCOORD0]));
David Parks
committed
}
David Parks
committed
if (data_mask & MAP_COLOR)
David Parks
committed
{
David Parks
committed
glColorPointer(4, GL_UNSIGNED_BYTE, LLVertexBuffer::sTypeSize[TYPE_COLOR], (void*)(base + mOffsets[TYPE_COLOR]));
David Parks
committed
if (data_mask & MAP_VERTEX)
David Parks
committed
glVertexPointer(3,GL_FLOAT, LLVertexBuffer::sTypeSize[TYPE_VERTEX], (void*)(base + 0));
}
}
llglassertok();
}
LLVertexBuffer::MappedRegion::MappedRegion(S32 type, S32 index, S32 count)
: mType(type), mIndex(index), mCount(count)
{