diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp
index 71bdca60e55cfaa0446018e7d6677c8a6046cc91..7b5907a6687b9f2e294641c9d2495f95e3cfbf92 100644
--- a/indra/llrender/llvertexbuffer.cpp
+++ b/indra/llrender/llvertexbuffer.cpp
@@ -368,7 +368,9 @@ LLVertexBuffer::LLVertexBuffer(U32 typemask, S32 usage) :
 	mGLBuffer(0),
 	mGLIndices(0), 
 	mMappedData(NULL),
-	mMappedIndexData(NULL), mLocked(FALSE),
+	mMappedIndexData(NULL), 
+	mVertexLocked(FALSE),
+	mIndexLocked(FALSE),
 	mFinal(FALSE),
 	mFilthy(FALSE),
 	mEmpty(TRUE),
@@ -865,24 +867,24 @@ void LLVertexBuffer::allocateClientIndexBuffer()
 }
 
 // Map for data access
-U8* LLVertexBuffer::mapBuffer(S32 access)
+U8* LLVertexBuffer::mapVertexBuffer(S32 type, S32 access)
 {
 	LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
 	if (mFinal)
 	{
-		llerrs << "LLVertexBuffer::mapBuffer() called on a finalized buffer." << llendl;
+		llerrs << "LLVertexBuffer::mapVeretxBuffer() called on a finalized buffer." << llendl;
 	}
 	if (!useVBOs() && !mMappedData && !mMappedIndexData)
 	{
-		llerrs << "LLVertexBuffer::mapBuffer() called on unallocated buffer." << llendl;
+		llerrs << "LLVertexBuffer::mapVertexBuffer() called on unallocated buffer." << llendl;
 	}
 		
-	if (!mLocked && useVBOs())
+	if (!mVertexLocked && useVBOs())
 	{
 		{
 			LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_VERTICES);
-			setBuffer(0);
-			mLocked = TRUE;
+			setBuffer(0, type);
+			mVertexLocked = TRUE;
 			stop_glerror();	
 
 			if(sDisableVBOMapping)
@@ -895,20 +897,7 @@ U8* LLVertexBuffer::mapBuffer(S32 access)
 			}
 			stop_glerror();
 		}
-		{
-			LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES);
-
-			if(sDisableVBOMapping)
-			{
-				allocateClientIndexBuffer() ;
-			}
-			else
-			{
-				mMappedIndexData = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
-			}
-			stop_glerror();
-		}
-
+		
 		if (!mMappedData)
 		{
 			log_glerror();
@@ -944,6 +933,43 @@ U8* LLVertexBuffer::mapBuffer(S32 access)
 				llerrs << "memory allocation for vertex data failed." << llendl ;
 			}
 		}
+		sMappedCount++;
+	}
+	
+	return mMappedData;
+}
+
+U8* LLVertexBuffer::mapIndexBuffer(S32 access)
+{
+	LLMemType mt2(LLMemType::MTYPE_VERTEX_MAP_BUFFER);
+	if (mFinal)
+	{
+		llerrs << "LLVertexBuffer::mapIndexBuffer() called on a finalized buffer." << llendl;
+	}
+	if (!useVBOs() && !mMappedData && !mMappedIndexData)
+	{
+		llerrs << "LLVertexBuffer::mapIndexBuffer() called on unallocated buffer." << llendl;
+	}
+
+	if (!mIndexLocked && useVBOs())
+	{
+		{
+			LLMemType mt_v(LLMemType::MTYPE_VERTEX_MAP_BUFFER_INDICES);
+
+			setBuffer(0, TYPE_INDEX);
+			mIndexLocked = TRUE;
+			stop_glerror();	
+
+			if(sDisableVBOMapping)
+			{
+				allocateClientIndexBuffer() ;
+			}
+			else
+			{
+				mMappedIndexData = (U8*) glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
+			}
+			stop_glerror();
+		}
 
 		if (!mMappedIndexData)
 		{
@@ -968,70 +994,80 @@ U8* LLVertexBuffer::mapBuffer(S32 access)
 
 		sMappedCount++;
 	}
-	
-	return mMappedData;
+
+	return mMappedIndexData ;
 }
 
-void LLVertexBuffer::unmapBuffer()
+void LLVertexBuffer::unmapBuffer(S32 type)
 {
 	LLMemType mt2(LLMemType::MTYPE_VERTEX_UNMAP_BUFFER);
-	if (mMappedData || mMappedIndexData)
+	if (!useVBOs())
+	{
+		return ; //nothing to unmap
+	}
+
+	bool updated_all = false ;
+	if (mMappedData && mVertexLocked && type != TYPE_INDEX)
 	{
-		if (useVBOs() && mLocked)
+		updated_all = (mIndexLocked && type < 0) ; //both vertex and index buffers done updating
+
+		if(sDisableVBOMapping)
 		{
-			if(sDisableVBOMapping)
-			{
-				if(mMappedData)
-				{
-					stop_glerror();
-					glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData);
-					stop_glerror();
-				}
-				if(mMappedIndexData)
-				{
-					stop_glerror();
-					glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData);
-					stop_glerror();
-				}
-			}
-			else
-			{
-				stop_glerror();
-				glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
-				stop_glerror();
-				glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
-				stop_glerror();
-			}
+			stop_glerror();
+			glBufferSubDataARB(GL_ARRAY_BUFFER_ARB, 0, getSize(), mMappedData);
+			stop_glerror();
+		}
+		else
+		{
+			stop_glerror();
+			glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
+			stop_glerror();
 
-			/*if (!sMapped)
-			{
-				llerrs << "Redundantly unmapped VBO!" << llendl;
-			}
-			sMapped = FALSE;*/
-			sMappedCount--;
+			mMappedData = NULL;
+		}
 
-			if (mUsage == GL_STATIC_DRAW_ARB)
-			{ //static draw buffers can only be mapped a single time
-				//throw out client data (we won't be using it again)
-				mEmpty = TRUE;
-				mFinal = TRUE;
+		mVertexLocked = FALSE ;
+		sMappedCount--;
+	}
 
-				if(sDisableVBOMapping)
-				{
-					freeClientBuffer() ;
-				}
-			}
-			else
-			{
-				mEmpty = FALSE;
-			}
+	if(mMappedIndexData && mIndexLocked && (type < 0 || type == TYPE_INDEX))
+	{
+		if(sDisableVBOMapping)
+		{
+			stop_glerror();
+			glBufferSubDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0, getIndicesSize(), mMappedIndexData);
+			stop_glerror();
+		}
+		else
+		{
+			stop_glerror();
+			glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
+			stop_glerror();
 
-			if(!sDisableVBOMapping)
+			mMappedIndexData = NULL ;
+		}
+
+		mIndexLocked = FALSE ;
+		sMappedCount--;
+	}
+
+	if(updated_all)
+	{
+		if(mUsage == GL_STATIC_DRAW_ARB)
+		{
+			//static draw buffers can only be mapped a single time
+			//throw out client data (we won't be using it again)
+			mEmpty = TRUE;
+			mFinal = TRUE;
+
+			if(sDisableVBOMapping)
 			{
-				mMappedIndexData = NULL;
-				mMappedData = NULL;
+				freeClientBuffer() ;
 			}
-			mLocked = FALSE;
+		}
+		else
+		{
+			mEmpty = FALSE;
 		}
 	}
 }
@@ -1045,15 +1081,16 @@ template <class T,S32 type> struct VertexBufferStrider
 					strider_t& strider, 
 					S32 index)
 	{
-		if (vbo.mapBuffer() == NULL)
-		{
-			llwarns << "mapBuffer failed!" << llendl;
-			return FALSE;
-		}
-
 		if (type == LLVertexBuffer::TYPE_INDEX)
 		{
 			S32 stride = sizeof(T);
+
+			if (vbo.mapIndexBuffer() == NULL)
+			{
+				llwarns << "mapIndexBuffer failed!" << llendl;
+				return FALSE;
+			}
+
 			strider = (T*)(vbo.getMappedIndices() + index*stride);
 			strider.setStride(0);
 			return TRUE;
@@ -1061,6 +1098,13 @@ template <class T,S32 type> struct VertexBufferStrider
 		else if (vbo.hasDataType(type))
 		{
 			S32 stride = vbo.getStride();
+
+			if (vbo.mapVertexBuffer(type) == NULL)
+			{
+				llwarns << "mapVertexBuffer failed!" << llendl;
+				return FALSE;
+			}
+
 			strider = (T*)(vbo.getMappedData() + vbo.getOffset(type) + index*stride);
 			strider.setStride(stride);
 			return TRUE;
@@ -1141,7 +1185,7 @@ void LLVertexBuffer::setStride(S32 type, S32 new_stride)
 //----------------------------------------------------------------------------
 
 // Set for rendering
-void LLVertexBuffer::setBuffer(U32 data_mask)
+void LLVertexBuffer::setBuffer(U32 data_mask, S32 type)
 {
 	LLMemType mt2(LLMemType::MTYPE_VERTEX_SET_BUFFER);
 	//set up pointers if the data mask is different ...
@@ -1282,7 +1326,7 @@ void LLVertexBuffer::setBuffer(U32 data_mask)
 		{
 			ll_fail("LLVertexBuffer::mapBuffer failed");
 		}
-		unmapBuffer();
+		unmapBuffer(type);
 	}
 	else
 	{		
diff --git a/indra/llrender/llvertexbuffer.h b/indra/llrender/llvertexbuffer.h
index 09a16d5b9dc67da2a989f7575856b126a032f8d2..c51ce7ac4e1119bd8b4d14c583f94ea2c032a37c 100644
--- a/indra/llrender/llvertexbuffer.h
+++ b/indra/llrender/llvertexbuffer.h
@@ -139,23 +139,24 @@ class LLVertexBuffer : public LLRefCount
 	void	updateNumVerts(S32 nverts);
 	void	updateNumIndices(S32 nindices); 
 	virtual BOOL	useVBOs() const;
-	void	unmapBuffer();
-		
+	void	unmapBuffer(S32 type);
+	void freeClientBuffer() ;
+	void allocateClientVertexBuffer() ;
+	void allocateClientIndexBuffer() ;
+
 public:
 	LLVertexBuffer(U32 typemask, S32 usage);
 	
 	// map for data access
-	U8*		mapBuffer(S32 access = -1);
+	U8*		mapVertexBuffer(S32 type = -1, S32 access = -1);
+	U8*		mapIndexBuffer(S32 access = -1);
+
 	// set for rendering
-	virtual void	setBuffer(U32 data_mask); 	// calls  setupVertexBuffer() if data_mask is not 0
+	virtual void	setBuffer(U32 data_mask, S32 type = -1); 	// calls  setupVertexBuffer() if data_mask is not 0
 	// allocate buffer
 	void	allocateBuffer(S32 nverts, S32 nindices, bool create);
 	virtual void resizeBuffer(S32 newnverts, S32 newnindices);
-		
-	void freeClientBuffer() ;
-	void allocateClientVertexBuffer() ;
-	void allocateClientIndexBuffer() ;
-	
+			
 	// Only call each getVertexPointer, etc, once before calling unmapBuffer()
 	// call unmapBuffer() after calls to getXXXStrider() before any cals to setBuffer()
 	// example:
@@ -174,7 +175,7 @@ class LLVertexBuffer : public LLRefCount
 	bool getClothWeightStrider(LLStrider<LLVector4>& strider, S32 index=0);
 	
 	BOOL isEmpty() const					{ return mEmpty; }
-	BOOL isLocked() const					{ return mLocked; }
+	BOOL isLocked() const					{ return mVertexLocked || mIndexLocked; }
 	S32 getNumVerts() const					{ return mNumVerts; }
 	S32 getNumIndices() const				{ return mNumIndices; }
 	S32 getRequestedVerts() const			{ return mRequestedNumVerts; }
@@ -213,14 +214,15 @@ class LLVertexBuffer : public LLRefCount
 	U32		mGLIndices;		// GL IBO handle
 	U8*		mMappedData;	// pointer to currently mapped data (NULL if unmapped)
 	U8*		mMappedIndexData;	// pointer to currently mapped indices (NULL if unmapped)
-	BOOL	mLocked;			// if TRUE, buffer is being or has been written to in client memory
+	BOOL	mVertexLocked;			// if TRUE, vertex buffer is being or has been written to in client memory
+	BOOL	mIndexLocked;			// if TRUE, index buffer is being or has been written to in client memory
 	BOOL	mFinal;			// if TRUE, buffer can not be mapped again
 	BOOL	mFilthy;		// if TRUE, entire buffer must be copied (used to prevent redundant dirty flags)
-	BOOL	mEmpty;			// if TRUE, client buffer is empty (or NULL). Old values have been discarded.
-	S32		mOffsets[TYPE_MAX];
+	BOOL	mEmpty;			// if TRUE, client buffer is empty (or NULL). Old values have been discarded.	
 	BOOL	mResized;		// if TRUE, client buffer has been resized and GL buffer has not
 	BOOL	mDynamicSize;	// if TRUE, buffer has been resized at least once (and should be padded)
-	
+	S32		mOffsets[TYPE_MAX];
+
 	class DirtyRegion
 	{
 	public: