diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp
index 9efdcd4e8c1ec7ac31ba13de30c0685ee58457d6..04d3e9fc08da05a209f628835302a9b8920dee6c 100644
--- a/indra/llmath/llvolume.cpp
+++ b/indra/llmath/llvolume.cpp
@@ -4955,7 +4955,7 @@ bool LLVolumeFace::VertexMapData::ComparePosition::operator()(const LLVector3& a
 
 void LLVolumeFace::remap()
 {
-    // generate a remap buffer
+    // Generate a remap buffer
     std::vector<unsigned int> remap(mNumIndices);
     S32 remap_vertices_count = LLMeshOptimizer::generateRemapMultiU16(&remap[0],
         mIndices,
@@ -4966,26 +4966,29 @@ void LLVolumeFace::remap()
         mNumVertices);
 
     // Allocate new buffers
-    U16* remap_indices = (U16*)ll_aligned_malloc_16(mNumIndices * sizeof(U16));
+    S32 size = ((mNumIndices * sizeof(U16)) + 0xF) & ~0xF;
+    U16* remap_indices = (U16*)ll_aligned_malloc_16(size);
 
     S32 tc_bytes_size = ((remap_vertices_count * sizeof(LLVector2)) + 0xF) & ~0xF;
     LLVector4a* remap_positions = (LLVector4a*)ll_aligned_malloc<64>(sizeof(LLVector4a) * 2 * remap_vertices_count + tc_bytes_size);
     LLVector4a* remap_normals = remap_positions + remap_vertices_count;
     LLVector2* remap_tex_coords = (LLVector2*)(remap_normals + remap_vertices_count);
 
-    // fill the buffers
+    // Fill the buffers
     LLMeshOptimizer::remapIndexBufferU16(remap_indices, mIndices, mNumIndices, &remap[0]);
     LLMeshOptimizer::remapPositionsBuffer(remap_positions, mPositions, mNumVertices, &remap[0]);
     LLMeshOptimizer::remapNormalsBuffer(remap_normals, mNormals, mNumVertices, &remap[0]);
     LLMeshOptimizer::remapUVBuffer(remap_tex_coords, mTexCoords, mNumVertices, &remap[0]);
 
-    // free unused buffers
+    // Free unused buffers
     ll_aligned_free_16(mIndices);
     ll_aligned_free<64>(mPositions);
-    ll_aligned_free_16(mTangents);
 
+    // Tangets are now invalid
+    ll_aligned_free_16(mTangents);
     mTangents = NULL;
 
+    // Assign new values
     mIndices = remap_indices;
     mPositions = remap_positions;
     mNormals = remap_normals;
diff --git a/indra/llmeshoptimizer/llmeshoptimizer.cpp b/indra/llmeshoptimizer/llmeshoptimizer.cpp
index cb9716a9070fa000e97fa672ce57ba413dd0c990..c178348968c24599183bec3b560686f32afd1534 100644
--- a/indra/llmeshoptimizer/llmeshoptimizer.cpp
+++ b/indra/llmeshoptimizer/llmeshoptimizer.cpp
@@ -184,12 +184,6 @@ size_t LLMeshOptimizer::generateRemapMultiU16(
     const LLVector2 * text_coords,
     U64 vertex_count)
 {
-    meshopt_Stream streams[] = {
-       {(const float*)vertex_positions, sizeof(F32) * 3, sizeof(F32) * 4},
-       {(const float*)normals, sizeof(F32) * 3, sizeof(F32) * 4},
-       {(const float*)text_coords, sizeof(F32) * 2, sizeof(F32) * 2},
-    };
-
     S32 out_of_range_count = 0;
     U32* indices_u32 = NULL;
     if (indices)
@@ -199,7 +193,7 @@ size_t LLMeshOptimizer::generateRemapMultiU16(
         {
             if (indices[i] < vertex_count)
             {
-                indices_u32[i] = indices[i];
+                indices_u32[i] = (U32)indices[i];
             }
             else
             {
@@ -211,14 +205,10 @@ size_t LLMeshOptimizer::generateRemapMultiU16(
 
     if (out_of_range_count)
     {
-        LL_WARNS() << out_of_range_count << " indexes are out of range." << LL_ENDL;
+        LL_WARNS() << out_of_range_count << " indices are out of range." << LL_ENDL;
     }
 
-    // Remap can function without indices,
-    // but providing indices helps with removing unused vertices
-    U64 indeces_cmp = indices_u32 ? index_count : vertex_count;
-
-    size_t unique =  meshopt_generateVertexRemapMulti(&remap[0], indices_u32, indeces_cmp, vertex_count, streams, sizeof(streams) / sizeof(streams[0]));
+    size_t unique = generateRemapMultiU32(remap, indices_u32, index_count, vertex_positions, normals, text_coords, vertex_count);
 
     ll_aligned_free_32(indices_u32);
 
diff --git a/indra/newview/llmodelpreview.cpp b/indra/newview/llmodelpreview.cpp
index ef791fd80de7a24a2e11e723d57bf02acb9aebc1..4ec2d82350a48513ae2bcfb1b6888cf26127ef4b 100644
--- a/indra/newview/llmodelpreview.cpp
+++ b/indra/newview/llmodelpreview.cpp
@@ -1221,7 +1221,7 @@ void LLModelPreview::restoreNormals()
 // Runs per object, but likely it is a better way to run per model+submodels
 // returns a ratio of base model indices to resulting indices
 // returns -1 in case of failure
-F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *target_model, F32 indices_decimator, F32 error_threshold, bool sloppy)
+F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *target_model, F32 indices_decimator, F32 error_threshold, eSimplificationMode simplification_mode)
 {
     // I. Weld faces together
     // Figure out buffer size
@@ -1258,20 +1258,21 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe
     {
         const LLVolumeFace &face = base_model->getVolumeFace(face_idx);
 
-        // vertices
+        // Vertices
         S32 copy_bytes = face.mNumVertices * sizeof(LLVector4a);
         LLVector4a::memcpyNonAliased16((F32*)(combined_positions + combined_positions_shift), (F32*)face.mPositions, copy_bytes);
 
-        // normals
+        // Normals
         LLVector4a::memcpyNonAliased16((F32*)(combined_normals + combined_positions_shift), (F32*)face.mNormals, copy_bytes);
 
-        // tex coords
+        // Tex coords
         copy_bytes = face.mNumVertices * sizeof(LLVector2);
         memcpy((void*)(combined_tex_coords + combined_positions_shift), (void*)face.mTexCoords, copy_bytes);
 
         combined_positions_shift += face.mNumVertices;
 
-        // indices, sadly can't do dumb memcpy for indices, need to adjust each value
+        // Indices
+        // Sadly can't do dumb memcpy for indices, need to adjust each value
         for (S32 i = 0; i < face.mNumIndices; ++i)
         {
             U16 idx = face.mIndices[i];
@@ -1282,38 +1283,42 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe
         indices_idx_shift += face.mNumVertices;
     }
 
-    // II. Remap.
-    std::vector<unsigned int> remap(size_indices);
-    S32 size_remap_vertices = LLMeshOptimizer::generateRemapMultiU32(&remap[0],
-        combined_indices,
-        size_indices,
-        combined_positions,
-        combined_normals,
-        combined_tex_coords,
-        size_vertices);
-
-    // Allocate new buffers
-    U32* remap_indices = (U32*)ll_aligned_malloc_32(size_indices * sizeof(U32));
+    // II. Generate a shadow buffer if nessesary.
+    // Welds together vertices if possible
 
-    S32 remap_tc_bytes_size = ((size_remap_vertices * sizeof(LLVector2)) + 0xF) & ~0xF;
-    LLVector4a* remap_positions = (LLVector4a*)ll_aligned_malloc<64>(sizeof(LLVector4a) * 2 * size_remap_vertices + remap_tc_bytes_size);
-    LLVector4a* remap_normals = remap_positions + size_remap_vertices;
-    LLVector2* remap_tex_coords = (LLVector2*)(remap_normals + size_remap_vertices);
-
-    // fill the buffers
-    LLMeshOptimizer::remapIndexBufferU32(remap_indices, combined_indices, size_indices, &remap[0]);
-    LLMeshOptimizer::remapPositionsBuffer(remap_positions, combined_positions, size_vertices, &remap[0]);
-    LLMeshOptimizer::remapNormalsBuffer(remap_normals, combined_normals, size_vertices, &remap[0]);
-    LLMeshOptimizer::remapUVBuffer(remap_tex_coords, combined_tex_coords, size_vertices, &remap[0]);
+    U32* shadow_indices = NULL;
+    // if MESH_OPTIMIZER_FULL, just leave as is, since generateShadowIndexBufferU32
+    // won't do anything new, model was remaped on a per face basis.
+    // Similar for MESH_OPTIMIZER_NO_TOPOLOGY, it's pointless
+    // since 'simplifySloppy' ignores all topology, including normals and uvs.
+    // Note: simplifySloppy can affect UVs significantly.
+    if (simplification_mode == MESH_OPTIMIZER_NO_NORMALS)
+    {
+        // strip normals, reflections should restore relatively correctly
+        shadow_indices = (U32*)ll_aligned_malloc_32(size_indices * sizeof(U32));
+        LLMeshOptimizer::generateShadowIndexBufferU32(shadow_indices, combined_indices, size_indices, combined_positions, NULL, combined_tex_coords, size_vertices);
+    }
+    if (simplification_mode == MESH_OPTIMIZER_NO_UVS)
+    {
+        // strip uvs, can heavily affect textures
+        shadow_indices = (U32*)ll_aligned_malloc_32(size_indices * sizeof(U32));
+        LLMeshOptimizer::generateShadowIndexBufferU32(shadow_indices, combined_indices, size_indices, combined_positions, NULL, NULL, size_vertices);
+    }
 
-    // free unused buffers
-    ll_aligned_free<64>(combined_positions);
-    ll_aligned_free_32(combined_indices);
+    U32* source_indices = NULL;
+    if (shadow_indices)
+    {
+        source_indices = shadow_indices;
+    }
+    else
+    {
+        source_indices = combined_indices;
+    }
 
     // III. Simplify
     S32 target_indices = 0;
     F32 result_error = 0; // how far from original the model is, 1 == 100%
-    S32 new_indices = 0;
+    S32 size_new_indices = 0;
 
     if (indices_decimator > 0)
     {
@@ -1324,32 +1329,36 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe
         target_indices = 3;
     }
 
-    new_indices = LLMeshOptimizer::simplifyU32(
+    size_new_indices = LLMeshOptimizer::simplifyU32(
         output_indices,
-        remap_indices,
+        source_indices,
         size_indices,
-        remap_positions,
-        size_remap_vertices,
+        combined_positions,
+        size_vertices,
         LLVertexBuffer::sTypeSize[LLVertexBuffer::TYPE_VERTEX],
         target_indices,
         error_threshold,
-        sloppy,
+        simplification_mode == MESH_OPTIMIZER_NO_TOPOLOGY,
         &result_error);
 
     if (result_error < 0)
     {
         LL_WARNS() << "Negative result error from meshoptimizer for model " << target_model->mLabel
             << " target Indices: " << target_indices
-            << " new Indices: " << new_indices
+            << " new Indices: " << size_new_indices
             << " original count: " << size_indices << LL_ENDL;
     }
 
-    ll_aligned_free_32(remap_indices);
+    // free unused buffers
+    ll_aligned_free_32(combined_indices);
+    ll_aligned_free_32(shadow_indices);
+    combined_indices = NULL;
+    shadow_indices = NULL;
 
-    if (new_indices < 3)
+    if (size_new_indices < 3)
     {
         // Model should have at least one visible triangle
-        ll_aligned_free<64>(remap_positions);
+        ll_aligned_free<64>(combined_positions);
         ll_aligned_free_32(output_indices);
 
         return -1;
@@ -1357,12 +1366,12 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe
 
     // IV. Repack back into individual faces
 
-    LLVector4a* buffer_positions = (LLVector4a*)ll_aligned_malloc<64>(sizeof(LLVector4a) * 2 * size_remap_vertices + tc_bytes_size);
-    LLVector4a* buffer_normals = buffer_positions + size_remap_vertices;
-    LLVector2* buffer_tex_coords = (LLVector2*)(buffer_normals + size_remap_vertices);
+    LLVector4a* buffer_positions = (LLVector4a*)ll_aligned_malloc<64>(sizeof(LLVector4a) * 2 * size_vertices + tc_bytes_size);
+    LLVector4a* buffer_normals = buffer_positions + size_vertices;
+    LLVector2* buffer_tex_coords = (LLVector2*)(buffer_normals + size_vertices);
     S32 buffer_idx_size = (size_indices * sizeof(U16) + 0xF) & ~0xF;
     U16* buffer_indices = (U16*)ll_aligned_malloc_16(buffer_idx_size);
-    S32* old_to_new_positions_map = new S32[size_remap_vertices];
+    S32* old_to_new_positions_map = new S32[size_vertices];
 
     S32 buf_positions_copied = 0;
     S32 buf_indices_copied = 0;
@@ -1380,13 +1389,13 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe
         bool copy_triangle = false;
         S32 range = indices_idx_shift + face.mNumVertices;
 
-        for (S32 i = 0; i < size_remap_vertices; i++)
+        for (S32 i = 0; i < size_vertices; i++)
         {
             old_to_new_positions_map[i] = -1;
         }
 
         // Copy relevant indices and vertices
-        for (S32 i = 0; i < new_indices; ++i)
+        for (S32 i = 0; i < size_new_indices; ++i)
         {
             U32 idx = output_indices[i];
 
@@ -1409,19 +1418,19 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe
                         LL_WARNS() << "Over triangle limit. Failed to optimize in 'per object' mode, falling back to per face variant for"
                             << " model " << target_model->mLabel
                             << " target Indices: " << target_indices
-                            << " new Indices: " << new_indices
+                            << " new Indices: " << size_new_indices
                             << " original count: " << size_indices
                             << " error treshold: " << error_threshold
                             << LL_ENDL;
 
                         // U16 vertices overflow shouldn't happen, but just in case
-                        new_indices = 0;
+                        size_new_indices = 0;
                         valid_faces = 0;
                         for (U32 face_idx = 0; face_idx < base_model->getNumVolumeFaces(); ++face_idx)
                         {
-                            genMeshOptimizerPerFace(base_model, target_model, face_idx, indices_decimator, error_threshold, false);
+                            genMeshOptimizerPerFace(base_model, target_model, face_idx, indices_decimator, error_threshold, simplification_mode);
                             const LLVolumeFace &face = target_model->getVolumeFace(face_idx);
-                            new_indices += face.mNumIndices;
+                            size_new_indices += face.mNumIndices;
                             if (face.mNumIndices >= 3)
                             {
                                 valid_faces++;
@@ -1429,7 +1438,7 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe
                         }
                         if (valid_faces)
                         {
-                            return (F32)size_indices / (F32)new_indices;
+                            return (F32)size_indices / (F32)size_new_indices;
                         }
                         else
                         {
@@ -1438,9 +1447,9 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe
                     }
 
                     // Copy vertice, normals, tcs
-                    buffer_positions[buf_positions_copied] = remap_positions[idx];
-                    buffer_normals[buf_positions_copied] = remap_normals[idx];
-                    buffer_tex_coords[buf_positions_copied] = remap_tex_coords[idx];
+                    buffer_positions[buf_positions_copied] = combined_positions[idx];
+                    buffer_normals[buf_positions_copied] = combined_normals[idx];
+                    buffer_tex_coords[buf_positions_copied] = combined_tex_coords[idx];
 
                     old_to_new_positions_map[idx] = buf_positions_copied;
 
@@ -1495,21 +1504,21 @@ F32 LLModelPreview::genMeshOptimizerPerModel(LLModel *base_model, LLModel *targe
     }
 
     delete[]old_to_new_positions_map;
-    ll_aligned_free<64>(remap_positions);
+    ll_aligned_free<64>(combined_positions);
     ll_aligned_free<64>(buffer_positions);
     ll_aligned_free_32(output_indices);
     ll_aligned_free_16(buffer_indices);
 
-    if (new_indices < 3 || valid_faces == 0)
+    if (size_new_indices < 3 || valid_faces == 0)
     {
         // Model should have at least one visible triangle
         return -1;
     }
 
-    return (F32)size_indices / (F32)new_indices;
+    return (F32)size_indices / (F32)size_new_indices;
 }
 
-F32 LLModelPreview::genMeshOptimizerPerFace(LLModel *base_model, LLModel *target_model, U32 face_idx, F32 indices_decimator, F32 error_threshold, bool sloppy)
+F32 LLModelPreview::genMeshOptimizerPerFace(LLModel *base_model, LLModel *target_model, U32 face_idx, F32 indices_decimator, F32 error_threshold, eSimplificationMode simplification_mode)
 {
     const LLVolumeFace &face = base_model->getVolumeFace(face_idx);
     S32 size_indices = face.mNumIndices;
@@ -1521,9 +1530,36 @@ F32 LLModelPreview::genMeshOptimizerPerFace(LLModel *base_model, LLModel *target
     S32 size = (size_indices * sizeof(U16) + 0xF) & ~0xF;
     U16* output_indices = (U16*)ll_aligned_malloc_16(size);
 
+    U16* shadow_indices = NULL;
+    // if MESH_OPTIMIZER_FULL, just leave as is, since generateShadowIndexBufferU32
+    // won't do anything new, model was remaped on a per face basis.
+    // Similar for MESH_OPTIMIZER_NO_TOPOLOGY, it's pointless
+    // since 'simplifySloppy' ignores all topology, including normals and uvs.
+    if (simplification_mode == MESH_OPTIMIZER_NO_NORMALS)
+    {
+        U16* shadow_indices = (U16*)ll_aligned_malloc_16(size);
+        LLMeshOptimizer::generateShadowIndexBufferU16(shadow_indices, face.mIndices, size_indices, face.mPositions, NULL, face.mTexCoords, face.mNumVertices);
+    }
+    if (simplification_mode == MESH_OPTIMIZER_NO_UVS)
+    {
+        U16* shadow_indices = (U16*)ll_aligned_malloc_16(size);
+        LLMeshOptimizer::generateShadowIndexBufferU16(shadow_indices, face.mIndices, size_indices, face.mPositions, NULL, NULL, face.mNumVertices);
+    }
+    // Don't run ShadowIndexBuffer for MESH_OPTIMIZER_NO_TOPOLOGY, it's pointless
+
+    U16* source_indices = NULL;
+    if (shadow_indices)
+    {
+        source_indices = shadow_indices;
+    }
+    else
+    {
+        source_indices = face.mIndices;
+    }
+
     S32 target_indices = 0;
     F32 result_error = 0; // how far from original the model is, 1 == 100%
-    S32 new_indices = 0;
+    S32 size_new_indices = 0;
 
     if (indices_decimator > 0)
     {
@@ -1534,16 +1570,16 @@ F32 LLModelPreview::genMeshOptimizerPerFace(LLModel *base_model, LLModel *target
         target_indices = 3;
     }
 
-    new_indices = LLMeshOptimizer::simplify(
+    size_new_indices = LLMeshOptimizer::simplify(
         output_indices,
-        face.mIndices,
+        source_indices,
         size_indices,
         face.mPositions,
         face.mNumVertices,
         LLVertexBuffer::sTypeSize[LLVertexBuffer::TYPE_VERTEX],
         target_indices,
         error_threshold,
-        sloppy,
+        simplification_mode == MESH_OPTIMIZER_NO_TOPOLOGY,
         &result_error);
 
     if (result_error < 0)
@@ -1551,7 +1587,7 @@ F32 LLModelPreview::genMeshOptimizerPerFace(LLModel *base_model, LLModel *target
         LL_WARNS() << "Negative result error from meshoptimizer for face " << face_idx
             << " of model " << target_model->mLabel
             << " target Indices: " << target_indices
-            << " new Indices: " << new_indices
+            << " new Indices: " << size_new_indices
             << " original count: " << size_indices
             << " error treshold: " << error_threshold
             << LL_ENDL;
@@ -1562,9 +1598,9 @@ F32 LLModelPreview::genMeshOptimizerPerFace(LLModel *base_model, LLModel *target
     // Copy old values
     new_face = face;
 
-    if (new_indices < 3)
+    if (size_new_indices < 3)
     {
-        if (!sloppy)
+        if (simplification_mode != MESH_OPTIMIZER_NO_TOPOLOGY)
         {
             // meshopt_optimizeSloppy() can optimize triangles away even if target_indices is > 2,
             // but optimize() isn't supposed to
@@ -1588,23 +1624,24 @@ F32 LLModelPreview::genMeshOptimizerPerFace(LLModel *base_model, LLModel *target
     else
     {
         // Assign new values
-        new_face.resizeIndices(new_indices); // will wipe out mIndices, so new_face can't substitute output
-        S32 idx_size = (new_indices * sizeof(U16) + 0xF) & ~0xF;
+        new_face.resizeIndices(size_new_indices); // will wipe out mIndices, so new_face can't substitute output
+        S32 idx_size = (size_new_indices * sizeof(U16) + 0xF) & ~0xF;
         LLVector4a::memcpyNonAliased16((F32*)new_face.mIndices, (F32*)output_indices, idx_size);
 
-        // clear unused values
+        // Clear unused values
         new_face.optimize();
     }
 
     ll_aligned_free_16(output_indices);
+    ll_aligned_free_16(shadow_indices);
      
-    if (new_indices < 3)
+    if (size_new_indices < 3)
     {
         // At least one triangle is needed
         return -1;
     }
 
-    return (F32)size_indices / (F32)new_indices;
+    return (F32)size_indices / (F32)size_new_indices;
 }
 
 void LLModelPreview::genMeshOptimizerLODs(S32 which_lod, S32 meshopt_mode, U32 decimation, bool enforce_tri_limit)
@@ -1740,14 +1777,17 @@ void LLModelPreview::genMeshOptimizerLODs(S32 which_lod, S32 meshopt_mode, U32 d
             // but combine all submodels with origin model as well
             if (model_meshopt_mode == MESH_OPTIMIZER_PRECISE)
             {
-                // Run meshoptimizer for each model/object, up to 8 faces in one model.
-
-                // Ideally this should run not per model,
-                // but combine all submodels with origin model as well
-                F32 res = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, false);
-                if (res < 0)
+                // Run meshoptimizer for each face
+                for (U32 face_idx = 0; face_idx < base->getNumVolumeFaces(); ++face_idx)
                 {
-                    target_model->copyVolumeFaces(base);
+                    F32 res = genMeshOptimizerPerFace(base, target_model, face_idx, indices_decimator, lod_error_threshold, MESH_OPTIMIZER_FULL);
+                    if (res < 0)
+                    {
+                        // Mesh optimizer failed and returned an invalid model
+                        const LLVolumeFace &face = base->getVolumeFace(face_idx);
+                        LLVolumeFace &new_face = target_model->getVolumeFace(face_idx);
+                        new_face = face;
+                    }
                 }
             }
 
@@ -1756,19 +1796,29 @@ void LLModelPreview::genMeshOptimizerLODs(S32 which_lod, S32 meshopt_mode, U32 d
                 // Run meshoptimizer for each face
                 for (U32 face_idx = 0; face_idx < base->getNumVolumeFaces(); ++face_idx)
                 {
-                    if (genMeshOptimizerPerFace(base, target_model, face_idx, indices_decimator, lod_error_threshold, true) < 0)
+                    if (genMeshOptimizerPerFace(base, target_model, face_idx, indices_decimator, lod_error_threshold, MESH_OPTIMIZER_NO_TOPOLOGY) < 0)
                     {
                         // Sloppy failed and returned an invalid model
-                        genMeshOptimizerPerFace(base, target_model, face_idx, indices_decimator, lod_error_threshold, false);
+                        genMeshOptimizerPerFace(base, target_model, face_idx, indices_decimator, lod_error_threshold, MESH_OPTIMIZER_FULL);
                     }
                 }
             }
 
             if (model_meshopt_mode == MESH_OPTIMIZER_AUTO)
             {
-                // Switches between 'combine' method and 'sloppy' based on combine's result.
-                F32 allowed_ratio_drift = 2.f;
-                F32 precise_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, false);
+                // Remove progressively more data if we can't reach the target.
+                F32 allowed_ratio_drift = 1.8f;
+                F32 precise_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, MESH_OPTIMIZER_FULL);
+
+                if (precise_ratio < 0 || (precise_ratio * allowed_ratio_drift < indices_decimator))
+                {
+                    precise_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, MESH_OPTIMIZER_NO_NORMALS);
+                }
+
+                if (precise_ratio < 0 || (precise_ratio * allowed_ratio_drift < indices_decimator))
+                {
+                    precise_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, MESH_OPTIMIZER_NO_UVS);
+                }
                 
                 if (precise_ratio < 0 || (precise_ratio * allowed_ratio_drift < indices_decimator))
                 {
@@ -1776,10 +1826,11 @@ void LLModelPreview::genMeshOptimizerLODs(S32 which_lod, S32 meshopt_mode, U32 d
                     // Sloppy variant can fail entirely and has issues with precision,
                     // so code needs to do multiple attempts with different decimators.
                     // Todo: this is a bit of a mess, needs to be refined and improved
+
                     F32 last_working_decimator = 0.f;
                     F32 last_working_ratio = F32_MAX;
 
-                    F32 sloppy_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, true);
+                    F32 sloppy_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, MESH_OPTIMIZER_NO_TOPOLOGY);
 
                     if (sloppy_ratio > 0)
                     {
@@ -1802,13 +1853,13 @@ void LLModelPreview::genMeshOptimizerLODs(S32 which_lod, S32 meshopt_mode, U32 d
                         // side due to overal lack of precision, and we don't need an ideal result, which
                         // likely does not exist, just a better one, so a partial correction is enough.
                         F32 sloppy_decimator = indices_decimator * (indices_decimator / sloppy_ratio + 1) / 2;
-                        sloppy_ratio = genMeshOptimizerPerModel(base, target_model, sloppy_decimator, lod_error_threshold, true);
+                        sloppy_ratio = genMeshOptimizerPerModel(base, target_model, sloppy_decimator, lod_error_threshold, MESH_OPTIMIZER_NO_TOPOLOGY);
                     }
 
                     if (last_working_decimator > 0 && sloppy_ratio < last_working_ratio)
                     {
                         // Compensation didn't work, return back to previous decimator
-                        sloppy_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, true);
+                        sloppy_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, MESH_OPTIMIZER_NO_TOPOLOGY);
                     }
 
                     if (sloppy_ratio < 0)
@@ -1841,7 +1892,7 @@ void LLModelPreview::genMeshOptimizerLODs(S32 which_lod, S32 meshopt_mode, U32 d
                                 && sloppy_decimator > precise_ratio
                                 && sloppy_decimator > 1)// precise_ratio isn't supposed to be below 1, but check just in case
                             {
-                                sloppy_ratio = genMeshOptimizerPerModel(base, target_model, sloppy_decimator, lod_error_threshold, true);
+                                sloppy_ratio = genMeshOptimizerPerModel(base, target_model, sloppy_decimator, lod_error_threshold, MESH_OPTIMIZER_NO_TOPOLOGY);
                                 sloppy_decimator = sloppy_decimator / sloppy_decimation_step;
                             }
                         }
@@ -1861,7 +1912,7 @@ void LLModelPreview::genMeshOptimizerLODs(S32 which_lod, S32 meshopt_mode, U32 d
                         else
                         {
                             // Fallback to normal method
-                            precise_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, false);
+                            precise_ratio = genMeshOptimizerPerModel(base, target_model, indices_decimator, lod_error_threshold, MESH_OPTIMIZER_FULL);
                         }
 
                         LL_INFOS() << "Model " << target_model->getName()
diff --git a/indra/newview/llmodelpreview.h b/indra/newview/llmodelpreview.h
index 727fe7937325cdea36aa94f46f178bbd588d0e7b..215f44357fb89d8982933856ece30c0020035065 100644
--- a/indra/newview/llmodelpreview.h
+++ b/indra/newview/llmodelpreview.h
@@ -225,13 +225,21 @@ class LLModelPreview : public LLViewerDynamicTexture, public LLMutex
     // Count amount of original models, excluding sub-models
     static U32 countRootModels(LLModelLoader::model_list models);
 
+    typedef enum
+    {
+        MESH_OPTIMIZER_FULL,
+        MESH_OPTIMIZER_NO_NORMALS,
+        MESH_OPTIMIZER_NO_UVS,
+        MESH_OPTIMIZER_NO_TOPOLOGY,
+    } eSimplificationMode;
+
     // Merges faces into single mesh, simplifies using mesh optimizer,
     // then splits back into faces.
     // Returns reached simplification ratio. -1 in case of a failure.
-    F32 genMeshOptimizerPerModel(LLModel *base_model, LLModel *target_model, F32 indices_ratio, F32 error_threshold, bool sloppy);
+    F32 genMeshOptimizerPerModel(LLModel *base_model, LLModel *target_model, F32 indices_ratio, F32 error_threshold, eSimplificationMode simplification_mode);
     // Simplifies specified face using mesh optimizer.
     // Returns reached simplification ratio. -1 in case of a failure.
-    F32 genMeshOptimizerPerFace(LLModel *base_model, LLModel *target_model, U32 face_idx, F32 indices_ratio, F32 error_threshold, bool sloppy);
+    F32 genMeshOptimizerPerFace(LLModel *base_model, LLModel *target_model, U32 face_idx, F32 indices_ratio, F32 error_threshold, eSimplificationMode simplification_mode);
 
 protected:
     friend class LLModelLoader;