Newer
Older
/**
* @file llvertexbuffer.cpp
* @brief LLVertexBuffer implementation
*
* $LicenseInfo:firstyear=2003&license=viewerlgpl$
* Copyright (C) 2010, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
#include "linden_common.h"
#include "llfasttimer.h"
#include "llsys.h"
#include "llvertexbuffer.h"
// #include "llrender.h"
#include "llglheaders.h"
#include "llrender.h"
David Parks
committed
#include "llshadermgr.h"
#include "llglslshader.h"
//Next Highest Power Of Two
//helper function, returns first number > v that is a power of 2, or v if v is already a power of 2
U32 nhpo2(U32 v)
{
U32 r = 1;
while (r < v) {
r *= 2;
}
return r;
}
//which power of 2 is i?
//assumes i is a power of 2 > 0
U32 wpo2(U32 i)
{
llassert(i > 0);
llassert(nhpo2(i) == i);
U32 r = 0;
while (i >>= 1) ++r;
return r;
}
David Parks
committed
struct CompareMappedRegion
{
bool operator()(const LLVertexBuffer::MappedRegion& lhs, const LLVertexBuffer::MappedRegion& rhs)
{
return lhs.mStart < rhs.mStart;
}
};
#define ENABLE_GL_WORK_QUEUE 0
#if ENABLE_GL_WORK_QUEUE
#define THREAD_COUNT 1
//============================================================================
David Parks
committed
// High performance WorkQueue for usage in real-time rendering work
class GLWorkQueue
{
public:
using Work = std::function<void()>;
David Parks
committed
GLWorkQueue();
David Parks
committed
David Parks
committed
void post(const Work& value);
David Parks
committed
David Parks
committed
size_t size();
David Parks
committed
bool done();
David Parks
committed
// Get the next element from the queue
Work pop();
David Parks
committed
David Parks
committed
void runOne();
David Parks
committed
bool runPending();
David Parks
committed
David Parks
committed
void runUntilClose();
David Parks
committed
David Parks
committed
void close();
David Parks
committed
bool isClosed();
David Parks
committed
David Parks
committed
void syncGL();
David Parks
committed
David Parks
committed
private:
std::mutex mMutex;
std::condition_variable mCondition;
std::queue<Work> mQueue;
bool mClosed = false;
};
David Parks
committed
David Parks
committed
GLWorkQueue::GLWorkQueue()
David Parks
committed
}
David Parks
committed
void GLWorkQueue::syncGL()
{
/*if (mSync)
{
std::lock_guard<std::mutex> lock(mMutex);
glWaitSync(mSync, 0, GL_TIMEOUT_IGNORED);
mSync = 0;
}*/
}
David Parks
committed
size_t GLWorkQueue::size()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
std::lock_guard<std::mutex> lock(mMutex);
return mQueue.size();
}
David Parks
committed
bool GLWorkQueue::done()
{
return size() == 0 && isClosed();
}
David Parks
committed
void GLWorkQueue::post(const GLWorkQueue::Work& value)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
{
std::lock_guard<std::mutex> lock(mMutex);
mQueue.push(std::move(value));
}
David Parks
committed
David Parks
committed
mCondition.notify_one();
}
David Parks
committed
// Get the next element from the queue
GLWorkQueue::Work GLWorkQueue::pop()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Lock the mutex
{
std::unique_lock<std::mutex> lock(mMutex);
David Parks
committed
// Wait for a new element to become available or for the queue to close
{
mCondition.wait(lock, [=] { return !mQueue.empty() || mClosed; });
}
}
David Parks
committed
David Parks
committed
Work ret;
David Parks
committed
David Parks
committed
{
std::lock_guard<std::mutex> lock(mMutex);
David Parks
committed
// Get the next element from the queue
if (mQueue.size() > 0)
{
ret = mQueue.front();
mQueue.pop();
}
else
{
ret = []() {};
}
}
David Parks
committed
return ret;
}
David Parks
committed
void GLWorkQueue::runOne()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
Work w = pop();
w();
//mSync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
}
David Parks
committed
void GLWorkQueue::runUntilClose()
David Parks
committed
while (!isClosed())
{
runOne();
}
}
David Parks
committed
void GLWorkQueue::close()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
{
std::lock_guard<std::mutex> lock(mMutex);
mClosed = true;
}
David Parks
committed
mCondition.notify_all();
}
David Parks
committed
bool GLWorkQueue::isClosed()
David Parks
committed
{
David Parks
committed
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
std::lock_guard<std::mutex> lock(mMutex);
return mClosed;
David Parks
committed
}
David Parks
committed
#include "llwindow.h"
class LLGLWorkerThread : public LLThread
David Parks
committed
public:
LLGLWorkerThread(const std::string& name, GLWorkQueue* queue, LLWindow* window)
: LLThread(name)
{
mWindow = window;
mContext = mWindow->createSharedContext();
mQueue = queue;
}
David Parks
committed
void run() override
{
mWindow->makeContextCurrent(mContext);
gGL.init(false);
mQueue->runUntilClose();
gGL.shutdown();
mWindow->destroySharedContext(mContext);
}
David Parks
committed
GLWorkQueue* mQueue;
LLWindow* mWindow;
void* mContext = nullptr;
};
David Parks
committed
static LLGLWorkerThread* sVBOThread[THREAD_COUNT];
static GLWorkQueue* sQueue = nullptr;
David Parks
committed
#endif
David Parks
committed
//============================================================================
David Parks
committed
// Pool of reusable VertexBuffer state
David Parks
committed
David Parks
committed
// batch calls to glGenBuffers
static GLuint gen_buffer()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
constexpr U32 pool_size = 4096;
thread_local static GLuint sNamePool[pool_size];
thread_local static U32 sIndex = 0;
if (sIndex == 0)
{
LL_PROFILE_ZONE_NAMED_CATEGORY_VERTEX("gen buffer");
sIndex = pool_size;
if (!gGLManager.mIsAMD)
{
glGenBuffers(pool_size, sNamePool);
}
else
{ // work around for AMD driver bug
for (U32 i = 0; i < pool_size; ++i)
{
glGenBuffers(1, sNamePool + i);
}
}
David Parks
committed
}
David Parks
committed
}
#define ANALYZE_VBO_POOL 0
David Parks
committed
class LLVBOPool
{
public:
typedef std::chrono::steady_clock::time_point Time;
struct Entry
{
U8* mData;
GLuint mGLName;
Time mAge;
};
~LLVBOPool()
{
clear();
}
typedef std::unordered_map<U32, std::list<Entry>> Pool;
Pool mVBOPool;
Pool mIBOPool;
U32 mTouchCount = 0;
#if ANALYZE_VBO_POOL
U64 mDistributed = 0;
U64 mAllocated = 0;
U64 mReserved = 0;
U32 mMisses = 0;
U32 mHits = 0;
#endif
// increase the size to some common value (e.g. a power of two) to increase hit rate
void adjustSize(U32& size)
{
// size = nhpo2(size); // (193/303)/580 MB (distributed/allocated)/reserved in VBO Pool. Overhead: 66 percent. Hit rate: 77 percent
//(245/276)/385 MB (distributed/allocated)/reserved in VBO Pool. Overhead: 57 percent. Hit rate: 69 percent
//(187/209)/397 MB (distributed/allocated)/reserved in VBO Pool. Overhead: 112 percent. Hit rate: 76 percent
U32 block_size = llmax(nhpo2(size) / 8, (U32) 16);
size += block_size - (size % block_size);
}
David Parks
committed
void allocate(GLenum type, U32 size, GLuint& name, U8*& data)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
llassert(type == GL_ARRAY_BUFFER || type == GL_ELEMENT_ARRAY_BUFFER);
llassert(name == 0); // non zero name indicates a gl name that wasn't freed
llassert(data == nullptr); // non null data indicates a buffer that wasn't freed
llassert(size >= 2); // any buffer size smaller than a single index is nonsensical
#if ANALYZE_VBO_POOL
mDistributed += size;
adjustSize(size);
mAllocated += size;
#else
adjustSize(size);
#endif
David Parks
committed
auto& pool = type == GL_ELEMENT_ARRAY_BUFFER ? mIBOPool : mVBOPool;
David Parks
committed
if (iter == pool.end())
{ // cache miss, allocate a new buffer
LL_PROFILE_ZONE_NAMED_CATEGORY_VERTEX("vbo pool miss");
LL_PROFILE_GPU_ZONE("vbo alloc");
#if ANALYZE_VBO_POOL
mMisses++;
#endif
David Parks
committed
name = gen_buffer();
glBindBuffer(type, name);
glBufferData(type, size, nullptr, GL_DYNAMIC_DRAW);
if (type == GL_ELEMENT_ARRAY_BUFFER)
{
LLVertexBuffer::sGLRenderIndices = name;
}
else
{
LLVertexBuffer::sGLRenderBuffer = name;
}
data = (U8*)ll_aligned_malloc_16(size);
}
else
{
#if ANALYZE_VBO_POOL
mHits++;
llassert(mReserved >= size); // assert if accounting gets messed up
mReserved -= size;
#endif
David Parks
committed
std::list<Entry>& entries = iter->second;
Entry& entry = entries.back();
name = entry.mGLName;
data = entry.mData;
David Parks
committed
entries.pop_back();
if (entries.empty())
{
pool.erase(iter);
}
}
David Parks
committed
}
void free(GLenum type, U32 size, GLuint name, U8* data)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
llassert(type == GL_ARRAY_BUFFER || type == GL_ELEMENT_ARRAY_BUFFER);
llassert(size >= 2);
llassert(name != 0);
llassert(data != nullptr);
clean();
#if ANALYZE_VBO_POOL
llassert(mDistributed >= size);
mDistributed -= size;
adjustSize(size);
llassert(mAllocated >= size);
mAllocated -= size;
mReserved += size;
#else
adjustSize(size);
#endif
David Parks
committed
auto& pool = type == GL_ELEMENT_ARRAY_BUFFER ? mIBOPool : mVBOPool;
David Parks
committed
if (iter == pool.end())
{
std::list<Entry> newlist;
newlist.push_front({ data, name, std::chrono::steady_clock::now() });
pool[size] = newlist;
}
else
{
iter->second.push_front({ data, name, std::chrono::steady_clock::now() });
}
David Parks
committed
}
// clean periodically (clean gets called for every alloc/free)
David Parks
committed
void clean()
{
mTouchCount++;
if (mTouchCount < 1024) // clean every 1k touches
{
return;
}
mTouchCount = 0;
David Parks
committed
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
std::unordered_map<U32, std::list<Entry>>* pools[] = { &mVBOPool, &mIBOPool };
using namespace std::chrono_literals;
Time cutoff = std::chrono::steady_clock::now() - 5s;
for (auto* pool : pools)
{
for (Pool::iterator iter = pool->begin(); iter != pool->end(); )
{
auto& entries = iter->second;
while (!entries.empty() && entries.back().mAge < cutoff)
{
LL_PROFILE_ZONE_NAMED_CATEGORY_VERTEX("vbo cache timeout");
auto& entry = entries.back();
ll_aligned_free_16(entry.mData);
glDeleteBuffers(1, &entry.mGLName);
#if ANALYZE_VBO_POOL
llassert(mReserved >= iter->first);
mReserved -= iter->first;
#endif
David Parks
committed
entries.pop_back();
David Parks
committed
}
if (entries.empty())
{
iter = pool->erase(iter);
}
else
{
++iter;
}
}
}
#if ANALYZE_VBO_POOL
LL_INFOS() << llformat("(%d/%d)/%d MB (distributed/allocated)/total in VBO Pool. Overhead: %d percent. Hit rate: %d percent",
mDistributed / 1000000,
mAllocated / 1000000,
(mAllocated + mReserved) / 1000000, // total bytes
((mAllocated+mReserved-mDistributed)*100)/llmax(mDistributed, (U64) 1), // overhead percent
(mHits*100)/llmax(mMisses+mHits, (U32)1)) // hit rate percent
<< LL_ENDL;
#endif
David Parks
committed
}
void clear()
{
for (auto& entries : mIBOPool)
{
for (auto& entry : entries.second)
{
ll_aligned_free_16(entry.mData);
glDeleteBuffers(1, &entry.mGLName);
}
}
for (auto& entries : mVBOPool)
{
for (auto& entry : entries.second)
{
ll_aligned_free_16(entry.mData);
glDeleteBuffers(1, &entry.mGLName);
}
}
#if ANALYZE_VBO_POOL
mReserved = 0;
#endif
David Parks
committed
mIBOPool.clear();
mVBOPool.clear();
}
};
static LLVBOPool* sVBOPool = nullptr;
//============================================================================
//
David Parks
committed
//static
U32 LLVertexBuffer::sGLRenderBuffer = 0;
U32 LLVertexBuffer::sGLRenderIndices = 0;
U32 LLVertexBuffer::sLastMask = 0;
U32 LLVertexBuffer::sVertexCount = 0;
David Parks
committed
//NOTE: each component must be AT LEAST 4 bytes in size to avoid a performance penalty on AMD hardware
const U32 LLVertexBuffer::sTypeSize[LLVertexBuffer::TYPE_MAX] =
sizeof(LLVector4), // TYPE_VERTEX,
sizeof(LLVector4), // TYPE_NORMAL,
sizeof(LLVector2), // TYPE_TEXCOORD0,
sizeof(LLVector2), // TYPE_TEXCOORD1,
sizeof(LLVector2), // TYPE_TEXCOORD2,
sizeof(LLVector2), // TYPE_TEXCOORD3,
sizeof(LLColor4U), // TYPE_COLOR,
David Parks
committed
sizeof(LLColor4U), // TYPE_EMISSIVE, only alpha is used currently
David Parks
committed
sizeof(LLVector4), // TYPE_TANGENT,
sizeof(F32), // TYPE_WEIGHT,
sizeof(LLVector4), // TYPE_CLOTHWEIGHT,
sizeof(LLVector4), // TYPE_TEXTURE_INDEX (actually exists as position.w), no extra data, but stride is 16 bytes
{
"TYPE_VERTEX",
"TYPE_NORMAL",
"TYPE_TEXCOORD0",
"TYPE_TEXCOORD1",
"TYPE_TEXCOORD2",
"TYPE_TEXCOORD3",
"TYPE_COLOR",
"TYPE_EMISSIVE",
David Parks
committed
"TYPE_TANGENT",
"TYPE_WEIGHT",
"TYPE_WEIGHT4",
"TYPE_CLOTHWEIGHT",
"TYPE_TEXTURE_INDEX",
"TYPE_MAX",
"TYPE_INDEX",
};
const U32 LLVertexBuffer::sGLMode[LLRender::NUM_MODES] =
{
GL_TRIANGLES,
GL_TRIANGLE_STRIP,
GL_TRIANGLE_FAN,
GL_POINTS,
GL_LINES,
GL_LINE_STRIP,
GL_QUADS,
GL_LINE_LOOP,
David Parks
committed
//static
void LLVertexBuffer::setupClientArrays(U32 data_mask)
David Parks
committed
{
if (sLastMask != data_mask)
{
for (U32 i = 0; i < TYPE_MAX; ++i)
{
S32 loc = i;
David Parks
committed
U32 mask = 1 << i;
if (sLastMask & (1 << i))
{ //was enabled
if (!(data_mask & mask))
{ //needs to be disabled
glDisableVertexAttribArray(loc);
}
}
else
{ //was disabled
if (data_mask & mask)
{ //needs to be enabled
glEnableVertexAttribArray(loc);
}
}
}
}
David Parks
committed
sLastMask = data_mask;
David Parks
committed
void LLVertexBuffer::drawArrays(U32 mode, const std::vector<LLVector3>& pos)
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
David Parks
committed
gGL.begin(mode);
for (auto& v : pos)
{
gGL.vertex3fv(v.mV);
}
gGL.end();
gGL.flush();
David Parks
committed
}
David Parks
committed
//static
void LLVertexBuffer::drawElements(U32 mode, const LLVector4a* pos, const LLVector2* tc, U32 num_indices, const U16* indicesp)
David Parks
committed
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
llassert(LLGLSLShader::sCurBoundShaderPtr != NULL);
David Parks
committed
gGL.syncMatrices();
David Parks
committed
U32 mask = LLVertexBuffer::MAP_VERTEX;
if (tc)
{
mask = mask | LLVertexBuffer::MAP_TEXCOORD0;
}
unbind();
David Parks
committed
gGL.begin(mode);
if (tc != nullptr)
{
for (int i = 0; i < num_indices; ++i)
{
U16 idx = indicesp[i];
gGL.texCoord2fv(tc[idx].mV);
gGL.vertex3fv(pos[idx].getF32ptr());
}
}
else
{
for (int i = 0; i < num_indices; ++i)
{
U16 idx = indicesp[i];
gGL.vertex3fv(pos[idx].getF32ptr());
}
David Parks
committed
gGL.end();
gGL.flush();
David Parks
committed
}
bool LLVertexBuffer::validateRange(U32 start, U32 end, U32 count, U32 indices_offset) const
if (!gDebugGL)
{
return true;
}
llassert(start < (U32)mNumVerts);
llassert(end < (U32)mNumVerts);
if (start >= (U32) mNumVerts ||
end >= (U32) mNumVerts)
LL_ERRS() << "Bad vertex buffer draw range: [" << start << ", " << end << "] vs " << mNumVerts << LL_ENDL;
llassert(mNumIndices >= 0);
if (indices_offset >= (U32) mNumIndices ||
indices_offset + count > (U32) mNumIndices)
LL_ERRS() << "Bad index buffer draw range: [" << indices_offset << ", " << indices_offset+count << "]" << LL_ENDL;
U16* idx = (U16*) mMappedIndexData+indices_offset;
llassert(idx[i] >= start);
llassert(idx[i] <= end);
if (idx[i] < start || idx[i] > end)
{
LL_ERRS() << "Index out of range: " << idx[i] << " not in [" << start << ", " << end << "]" << LL_ENDL;
LLGLSLShader* shader = LLGLSLShader::sCurBoundShaderPtr;
if (shader && shader->mFeatures.mIndexedTextureChannels > 1)
{
LLVector4a* v = (LLVector4a*) mMappedData;
for (U32 i = start; i < end; i++)
{
U32 idx = (U32) (v[i][3]+0.25f);
if (idx >= shader->mFeatures.mIndexedTextureChannels)
{
LL_ERRS() << "Bad texture index found in vertex data stream." << LL_ENDL;
}
}
}
return true;
#ifdef LL_PROFILER_ENABLE_RENDER_DOC
void LLVertexBuffer::setLabel(const char* label) {
LL_LABEL_OBJECT_GL(GL_BUFFER, mGLBuffer, strlen(label), label);
void LLVertexBuffer::drawRange(U32 mode, U32 start, U32 end, U32 count, U32 indices_offset) const
{
llassert(validateRange(start, end, count, indices_offset));
llassert(mGLBuffer == sGLRenderBuffer);
llassert(mGLIndices == sGLRenderIndices);
glDrawRangeElements(sGLMode[mode], start, end, count, GL_UNSIGNED_SHORT,
(GLvoid*) (indices_offset * sizeof(U16)));
void LLVertexBuffer::draw(U32 mode, U32 count, U32 indices_offset) const
{
David Parks
committed
drawRange(mode, 0, mNumVerts-1, count, indices_offset);
void LLVertexBuffer::drawArrays(U32 mode, U32 first, U32 count) const
{
llassert(first + count <= mNumVerts);
llassert(mGLBuffer == sGLRenderBuffer);
llassert(mGLIndices == sGLRenderIndices);
David Parks
committed
gGL.syncMatrices();
David Parks
committed
glDrawArrays(sGLMode[mode], first, count);
David Parks
committed
void LLVertexBuffer::initClass(LLWindow* window)
llassert(sVBOPool == nullptr);
David Parks
committed
sVBOPool = new LLVBOPool();
David Parks
committed
#if ENABLE_GL_WORK_QUEUE
sQueue = new GLWorkQueue();
for (int i = 0; i < THREAD_COUNT; ++i)
{
sVBOThread[i] = new LLGLWorkerThread("VBO Worker", sQueue, window);
sVBOThread[i]->start();
}
#endif
}
//static
void LLVertexBuffer::unbind()
{
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
sGLRenderBuffer = 0;
sGLRenderIndices = 0;
}
//static
void LLVertexBuffer::cleanupClass()
{
David Parks
committed
delete sVBOPool;
sVBOPool = nullptr;
David Parks
committed
#if ENABLE_GL_WORK_QUEUE
sQueue->close();
for (int i = 0; i < THREAD_COUNT; ++i)
{
sVBOThread[i]->shutdown();
delete sVBOThread[i];
sVBOThread[i] = nullptr;
}
delete sQueue;
sQueue = nullptr;
#endif
}
//----------------------------------------------------------------------------
LLVertexBuffer::LLVertexBuffer(U32 typemask)
: LLRefCount(),
mTypeMask(typemask)
Leslie Linden
committed
{
//zero out offsets
for (U32 i = 0; i < TYPE_MAX; i++)
{
mOffsets[i] = 0;
}
}
//static
U32 LLVertexBuffer::calcOffsets(const U32& typemask, U32* offsets, U32 num_vertices)
{
U32 offset = 0;
for (U32 i=0; i<TYPE_TEXTURE_INDEX; i++)
{
U32 mask = 1<<i;
if (typemask & mask)
{
if (offsets && LLVertexBuffer::sTypeSize[i])
{
offset += LLVertexBuffer::sTypeSize[i]*num_vertices;
}
offsets[TYPE_TEXTURE_INDEX] = offsets[TYPE_VERTEX] + 12;
David Parks
committed
return offset;
//static
U32 LLVertexBuffer::calcVertexSize(const U32& typemask)
{
U32 size = 0;
for (U32 i = 0; i < TYPE_TEXTURE_INDEX; i++)
{
U32 mask = 1<<i;
if (typemask & mask)
{
size += LLVertexBuffer::sTypeSize[i];
return size;
}
// protected, use unref()
//virtual
LLVertexBuffer::~LLVertexBuffer()
{
destroyGLBuffer();
destroyGLIndices();
if (mMappedData)
{
LL_ERRS() << "Failed to clear vertex buffer's vertices" << LL_ENDL;
}
if (mMappedIndexData)
{
LL_ERRS() << "Failed to clear vertex buffer's indices" << LL_ENDL;
}
};
//----------------------------------------------------------------------------
void LLVertexBuffer::genBuffer(U32 size)
{
David Parks
committed
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
llassert(sVBOPool);
David Parks
committed
David Parks
committed
if (sVBOPool)
{
llassert(mSize == 0);
llassert(mGLBuffer == 0);
llassert(mMappedData == nullptr);
mSize = size;
sVBOPool->allocate(GL_ARRAY_BUFFER, mSize, mGLBuffer, mMappedData);
David Parks
committed
}
}
void LLVertexBuffer::genIndices(U32 size)
{
David Parks
committed
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
llassert(sVBOPool);
David Parks
committed
if (sVBOPool)
{
llassert(mIndicesSize == 0);
llassert(mGLIndices == 0);
llassert(mMappedIndexData == nullptr);
mIndicesSize = size;
sVBOPool->allocate(GL_ELEMENT_ARRAY_BUFFER, mIndicesSize, mGLIndices, mMappedIndexData);
David Parks
committed
}
}
bool LLVertexBuffer::createGLBuffer(U32 size)
if (mGLBuffer || mMappedData)
{
destroyGLBuffer();
}
if (size == 0)
{
return true;
genBuffer(size);
Leslie Linden
committed
if (!mMappedData)
{
bool LLVertexBuffer::createGLIndices(U32 size)
{
if (mGLIndices)
{
destroyGLIndices();
}
if (size == 0)
{
return true;
genIndices(size);
if (!mMappedIndexData)
{
}
void LLVertexBuffer::destroyGLBuffer()
{
AndreyL ProductEngine
committed
if (mGLBuffer || mMappedData)
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
//llassert(sVBOPool);
if (sVBOPool)
{
sVBOPool->free(GL_ARRAY_BUFFER, mSize, mGLBuffer, mMappedData);
}
mSize = 0;
mGLBuffer = 0;
mMappedData = nullptr;
}
}
void LLVertexBuffer::destroyGLIndices()
{
AndreyL ProductEngine
committed
if (mGLIndices || mMappedIndexData)
LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX;
//llassert(sVBOPool);
if (sVBOPool)
{
sVBOPool->free(GL_ELEMENT_ARRAY_BUFFER, mIndicesSize, mGLIndices, mMappedIndexData);
}
mIndicesSize = 0;
mGLIndices = 0;
mMappedIndexData = nullptr;
}
bool LLVertexBuffer::updateNumVerts(U32 nverts)
{
LL_WARNS() << "Vertex buffer overflow!" << LL_ENDL;