From e79a88c8ccfadcd260892000d4dec2ae921b26de Mon Sep 17 00:00:00 2001
From: Monty Brandenberg <monty@lindenlab.com>
Date: Tue, 12 Aug 2014 18:21:26 -0400
Subject: [PATCH] Better support for dynamic option changes in llcorehttp. 
 Libcurl has some problems disabling pipelining on a multi handle with
 outstanding requests so build a more conservative system that allows requests
 to drain before setting curl multi options.  Would rather not have this but
 it is significantly safer.  "HttpPipelining" debug setting is now fully
 dynamic.  Connection limits can also be made dynamic in the near future. 
 Upped the default connection count back to 8 for now but will revisit this in
 the tuning phase.  It might be time to combine mesh and textures into a
 single asset class.  For normal server operations that would be a clear path,
 but for server under load, the current scheme may be better.  Minor cleanup
 in logging to elminate some redundant strings.  Might add some more tracing
 to the stall logic 'just in case'.

---
 indra/llcorehttp/_httplibcurl.cpp       | 198 ++++++++++++++++--------
 indra/llcorehttp/_httplibcurl.h         |  12 +-
 indra/llcorehttp/_httpoperation.cpp     |  28 ++--
 indra/llcorehttp/_httpoprequest.cpp     |  46 +++---
 indra/llcorehttp/_httppolicy.cpp        |  83 ++++++----
 indra/llcorehttp/_httppolicy.h          |   8 +
 indra/llcorehttp/_httpservice.cpp       |  30 ++--
 indra/newview/app_settings/settings.xml |   4 +-
 indra/newview/llappcorehttp.cpp         | 174 ++++++++++++---------
 indra/newview/llappcorehttp.h           |   1 +
 indra/newview/lltexturefetch.cpp        |  55 ++++---
 indra/newview/lltexturefetch.h          |  10 +-
 12 files changed, 421 insertions(+), 228 deletions(-)

diff --git a/indra/llcorehttp/_httplibcurl.cpp b/indra/llcorehttp/_httplibcurl.cpp
index fb907f6318e..b46833a1f39 100755
--- a/indra/llcorehttp/_httplibcurl.cpp
+++ b/indra/llcorehttp/_httplibcurl.cpp
@@ -40,6 +40,8 @@ namespace
 void check_curl_multi_code(CURLMcode code);
 void check_curl_multi_code(CURLMcode code, int curl_setopt_option);
 
+static const char * const LOG_CORE("CoreHttp");
+
 } // end anonymous namespace
 
 
@@ -51,7 +53,8 @@ HttpLibcurl::HttpLibcurl(HttpService * service)
 	: mService(service),
 	  mPolicyCount(0),
 	  mMultiHandles(NULL),
-	  mActiveHandles(NULL)
+	  mActiveHandles(NULL),
+	  mDirtyPolicy(NULL)
 {}
 
 
@@ -90,6 +93,9 @@ void HttpLibcurl::shutdown()
 
 		delete [] mActiveHandles;
 		mActiveHandles = NULL;
+
+		delete [] mDirtyPolicy;
+		mDirtyPolicy = NULL;
 	}
 
 	mPolicyCount = 0;
@@ -101,44 +107,21 @@ void HttpLibcurl::start(int policy_count)
 	llassert_always(policy_count <= HTTP_POLICY_CLASS_LIMIT);
 	llassert_always(! mMultiHandles);					// One-time call only
 	
-	HttpPolicy & policy(mService->getPolicy());
 	mPolicyCount = policy_count;
 	mMultiHandles = new CURLM * [mPolicyCount];
 	mActiveHandles = new int [mPolicyCount];
+	mDirtyPolicy = new bool [mPolicyCount];
 	
 	for (int policy_class(0); policy_class < mPolicyCount; ++policy_class)
 	{
-		HttpPolicyClass & options(policy.getClassOptions(policy_class));
-
-		mActiveHandles[policy_class] = 0;
 		if (NULL == (mMultiHandles[policy_class] = curl_multi_init()))
 		{
-			LL_ERRS("CoreHttp") << "Failed to allocate multi handle in libcurl."
-								<< LL_ENDL;
-		}
-				
-		if (options.mPipelining > 1)
-		{
-			CURLMcode code;
-			
-			// We'll try to do pipelining on this multihandle
-			code = curl_multi_setopt(mMultiHandles[policy_class],
-									 CURLMOPT_PIPELINING,
-									 1L);
-			check_curl_multi_code(code, CURLMOPT_PIPELINING);
-			code = curl_multi_setopt(mMultiHandles[policy_class],
-									 CURLMOPT_MAX_PIPELINE_LENGTH,
-									 long(options.mPipelining));
-			check_curl_multi_code(code, CURLMOPT_MAX_PIPELINE_LENGTH);
-			code = curl_multi_setopt(mMultiHandles[policy_class],
-									 CURLMOPT_MAX_HOST_CONNECTIONS,
-									 long(options.mPerHostConnectionLimit));
-			check_curl_multi_code(code, CURLMOPT_MAX_HOST_CONNECTIONS);
-			code = curl_multi_setopt(mMultiHandles[policy_class],
-									 CURLMOPT_MAX_TOTAL_CONNECTIONS,
-									 long(options.mConnectionLimit));
-			check_curl_multi_code(code, CURLMOPT_MAX_TOTAL_CONNECTIONS);
+			LL_ERRS(LOG_CORE) << "Failed to allocate multi handle in libcurl."
+							  << LL_ENDL;
 		}
+		mActiveHandles[policy_class] = 0;
+		mDirtyPolicy[policy_class] = false;
+		policyUpdated(policy_class);
 	}
 }
 
@@ -156,8 +139,19 @@ HttpService::ELoopSpeed HttpLibcurl::processTransport()
 	// Give libcurl some cycles to do I/O & callbacks
 	for (int policy_class(0); policy_class < mPolicyCount; ++policy_class)
 	{
-		if (! mActiveHandles[policy_class] || ! mMultiHandles[policy_class])
+		if (! mMultiHandles[policy_class])
 		{
+			// No handle, nothing to do.
+			continue;
+		}
+		if (! mActiveHandles[policy_class])
+		{
+			// If we've gone quiet and there's a dirty update, apply it,
+			// otherwise we're done.
+			if (mDirtyPolicy[policy_class])
+			{
+				policyUpdated(policy_class);
+			}
 			continue;
 		}
 		
@@ -192,9 +186,9 @@ HttpService::ELoopSpeed HttpLibcurl::processTransport()
 			}
 			else
 			{
-				LL_WARNS_ONCE("CoreHttp") << "Unexpected message from libcurl.  Msg code:  "
-										  << msg->msg
-										  << LL_ENDL;
+				LL_WARNS_ONCE(LOG_CORE) << "Unexpected message from libcurl.  Msg code:  "
+										<< msg->msg
+										<< LL_ENDL;
 			}
 			msgs_in_queue = 0;
 		}
@@ -230,11 +224,11 @@ void HttpLibcurl::addOp(HttpOpRequest * op)
 	{
 		HttpPolicy & policy(mService->getPolicy());
 		
-		LL_INFOS("CoreHttp") << "TRACE, ToActiveQueue, Handle:  "
-							 << static_cast<HttpHandle>(op)
-							 << ", Actives:  " << mActiveOps.size()
-							 << ", Readies:  " << policy.getReadyCount(op->mReqPolicy)
-							 << LL_ENDL;
+		LL_INFOS(LOG_CORE) << "TRACE, ToActiveQueue, Handle:  "
+						   << static_cast<HttpHandle>(op)
+						   << ", Actives:  " << mActiveOps.size()
+						   << ", Readies:  " << policy.getReadyCount(op->mReqPolicy)
+						   << LL_ENDL;
 	}
 	
 	// On success, make operation active
@@ -286,10 +280,10 @@ void HttpLibcurl::cancelRequest(HttpOpRequest * op)
 	// Tracing
 	if (op->mTracing > HTTP_TRACE_OFF)
 	{
-		LL_INFOS("CoreHttp") << "TRACE, RequestCanceled, Handle:  "
-							 << static_cast<HttpHandle>(op)
-							 << ", Status:  " << op->mStatus.toTerseString()
-							 << LL_ENDL;
+		LL_INFOS(LOG_CORE) << "TRACE, RequestCanceled, Handle:  "
+						   << static_cast<HttpHandle>(op)
+						   << ", Status:  " << op->mStatus.toTerseString()
+						   << LL_ENDL;
 	}
 
 	// Cancel op and deliver for notification
@@ -306,18 +300,18 @@ bool HttpLibcurl::completeRequest(CURLM * multi_handle, CURL * handle, CURLcode
 
 	if (handle != op->mCurlHandle || ! op->mCurlActive)
 	{
-		LL_WARNS("CoreHttp") << "libcurl handle and HttpOpRequest handle in disagreement or inactive request."
-							 << "  Handle:  " << static_cast<HttpHandle>(handle)
-							 << LL_ENDL;
+		LL_WARNS(LOG_CORE) << "libcurl handle and HttpOpRequest handle in disagreement or inactive request."
+						   << "  Handle:  " << static_cast<HttpHandle>(handle)
+						   << LL_ENDL;
 		return false;
 	}
 
 	active_set_t::iterator it(mActiveOps.find(op));
 	if (mActiveOps.end() == it)
 	{
-		LL_WARNS("CoreHttp") << "libcurl completion for request not on active list.  Continuing."
-							 << "  Handle:  " << static_cast<HttpHandle>(handle)
-							 << LL_ENDL;
+		LL_WARNS(LOG_CORE) << "libcurl completion for request not on active list.  Continuing."
+						   << "  Handle:  " << static_cast<HttpHandle>(handle)
+						   << LL_ENDL;
 		return false;
 	}
 
@@ -348,9 +342,9 @@ bool HttpLibcurl::completeRequest(CURLM * multi_handle, CURL * handle, CURLcode
 		}
 		else
 		{
-			LL_WARNS("CoreHttp") << "Invalid HTTP response code ("
-								 << http_status << ") received from server."
-								 << LL_ENDL;
+			LL_WARNS(LOG_CORE) << "Invalid HTTP response code ("
+							   << http_status << ") received from server."
+							   << LL_ENDL;
 			op->mStatus = HttpStatus(HttpStatus::LLCORE, HE_INVALID_HTTP_STATUS);
 		}
 	}
@@ -363,10 +357,10 @@ bool HttpLibcurl::completeRequest(CURLM * multi_handle, CURL * handle, CURLcode
 	// Tracing
 	if (op->mTracing > HTTP_TRACE_OFF)
 	{
-		LL_INFOS("CoreHttp") << "TRACE, RequestComplete, Handle:  "
-							 << static_cast<HttpHandle>(op)
-							 << ", Status:  " << op->mStatus.toTerseString()
-							 << LL_ENDL;
+		LL_INFOS(LOG_CORE) << "TRACE, RequestComplete, Handle:  "
+						   << static_cast<HttpHandle>(op)
+						   << ", Status:  " << op->mStatus.toTerseString()
+						   << LL_ENDL;
 	}
 
 	// Dispatch to next stage
@@ -390,6 +384,88 @@ int HttpLibcurl::getActiveCountInClass(int policy_class) const
 	return mActiveHandles ? mActiveHandles[policy_class] : 0;
 }
 
+void HttpLibcurl::policyUpdated(int policy_class)
+{
+	if (policy_class < 0 || policy_class >= mPolicyCount || ! mMultiHandles)
+	{
+		return;
+	}
+	
+	HttpPolicy & policy(mService->getPolicy());
+	
+	if (! mActiveHandles[policy_class])
+	{
+		// Clear to set options.  As of libcurl 7.37.0, if a pipelining
+		// multi handle has active requests and you try to set the
+		// multi handle to non-pipelining, the library gets very angry
+		// and goes off the rails corrupting memory.  A clue that you're
+		// about to crash is that you'll get a missing server response
+		// error (curl code 9).  So, if options are to be set, we let
+		// the multi handle run out of requests, then set options, and
+		// re-enable request processing.
+		//
+		// All of this stall mechanism exists for this reason.  If
+		// libcurl becomes more resilient later, it should be possible
+		// to remove all of this.  The connection limit settings are fine,
+		// it's just that pipelined-to-non-pipelined transition that
+		// is fatal at the moment.
+		
+		HttpPolicyClass & options(policy.getClassOptions(policy_class));
+		CURLM * multi_handle(mMultiHandles[policy_class]);
+		CURLMcode code;
+
+		// Enable policy if stalled
+		policy.stallPolicy(policy_class, false);
+		mDirtyPolicy[policy_class] = false;
+		
+		if (options.mPipelining > 1)
+		{
+			// We'll try to do pipelining on this multihandle
+			code = curl_multi_setopt(multi_handle,
+									 CURLMOPT_PIPELINING,
+									 1L);
+			check_curl_multi_code(code, CURLMOPT_PIPELINING);
+			code = curl_multi_setopt(multi_handle,
+									 CURLMOPT_MAX_PIPELINE_LENGTH,
+									 long(options.mPipelining));
+			check_curl_multi_code(code, CURLMOPT_MAX_PIPELINE_LENGTH);
+			code = curl_multi_setopt(multi_handle,
+									 CURLMOPT_MAX_HOST_CONNECTIONS,
+									 long(options.mPerHostConnectionLimit));
+			check_curl_multi_code(code, CURLMOPT_MAX_HOST_CONNECTIONS);
+			code = curl_multi_setopt(multi_handle,
+									 CURLMOPT_MAX_TOTAL_CONNECTIONS,
+									 long(options.mConnectionLimit));
+			check_curl_multi_code(code, CURLMOPT_MAX_TOTAL_CONNECTIONS);
+		}
+		else
+		{
+			code = curl_multi_setopt(multi_handle,
+									 CURLMOPT_PIPELINING,
+									 0L);
+			check_curl_multi_code(code, CURLMOPT_PIPELINING);
+			code = curl_multi_setopt(multi_handle,
+									 CURLMOPT_MAX_HOST_CONNECTIONS,
+									 0L);
+			check_curl_multi_code(code, CURLMOPT_MAX_HOST_CONNECTIONS);
+			code = curl_multi_setopt(multi_handle,
+									 CURLMOPT_MAX_TOTAL_CONNECTIONS,
+									 long(options.mConnectionLimit));
+			check_curl_multi_code(code, CURLMOPT_MAX_TOTAL_CONNECTIONS);
+		}
+	}
+	else if (! mDirtyPolicy[policy_class])
+	{
+		// Mark policy dirty and request a stall in the policy.
+		// When policy goes idle, we'll re-invoke this method
+		// and perform the change.  Don't allow this thread to
+		// sleep while we're waiting for quiescence, we'll just
+		// stop processing.
+		mDirtyPolicy[policy_class] = true;
+		policy.stallPolicy(policy_class, true);
+	}
+}
+
 
 // ---------------------------------------
 // Free functions
@@ -424,9 +500,9 @@ void check_curl_multi_code(CURLMcode code, int curl_setopt_option)
 {
 	if (CURLM_OK != code)
 	{
-		LL_WARNS("CoreHttp") << "libcurl multi error detected:  " << curl_multi_strerror(code)
-							 << ", curl_multi_setopt option:  " << curl_setopt_option
-							 << LL_ENDL;
+		LL_WARNS(LOG_CORE) << "libcurl multi error detected:  " << curl_multi_strerror(code)
+						   << ", curl_multi_setopt option:  " << curl_setopt_option
+						   << LL_ENDL;
 	}
 }
 
@@ -435,8 +511,8 @@ void check_curl_multi_code(CURLMcode code)
 {
 	if (CURLM_OK != code)
 	{
-		LL_WARNS("CoreHttp") << "libcurl multi error detected:  " << curl_multi_strerror(code)
-							 << LL_ENDL;
+		LL_WARNS(LOG_CORE) << "libcurl multi error detected:  " << curl_multi_strerror(code)
+						   << LL_ENDL;
 	}
 }
 
diff --git a/indra/llcorehttp/_httplibcurl.h b/indra/llcorehttp/_httplibcurl.h
index 67f98dd4f07..2c7ad1fa8ed 100755
--- a/indra/llcorehttp/_httplibcurl.h
+++ b/indra/llcorehttp/_httplibcurl.h
@@ -4,7 +4,7 @@
  *
  * $LicenseInfo:firstyear=2012&license=viewerlgpl$
  * Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -116,6 +116,14 @@ class HttpLibcurl
 	/// Threading:  called by worker thread.
 	bool cancel(HttpHandle handle);
 
+	/// Informs transport that a particular policy class has had
+	/// options changed and so should effect any transport state
+	/// change necessary to effect those changes.  Used mainly for
+	/// initialization and dynamic option setting.
+	///
+	/// Threading:  called by worker thread.
+	void policyUpdated(int policy_class);
+
 protected:
 	/// Invoked when libcurl has indicated a request has been processed
 	/// to completion and we need to move the request to a new state.
@@ -134,6 +142,8 @@ class HttpLibcurl
 	int					mPolicyCount;
 	CURLM **			mMultiHandles;			// One handle per policy class
 	int *				mActiveHandles;			// Active count per policy class
+	bool *				mDirtyPolicy;			// Dirty policy update waiting for stall (per pc)
+	
 }; // end class HttpLibcurl
 
 }  // end namespace LLCore
diff --git a/indra/llcorehttp/_httpoperation.cpp b/indra/llcorehttp/_httpoperation.cpp
index 5bb0654652f..fefe561f80e 100755
--- a/indra/llcorehttp/_httpoperation.cpp
+++ b/indra/llcorehttp/_httpoperation.cpp
@@ -4,7 +4,7 @@
  *
  * $LicenseInfo:firstyear=2012&license=viewerlgpl$
  * Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -38,6 +38,14 @@
 #include "lltimer.h"
 
 
+namespace
+{
+
+static const char * const LOG_CORE("CoreHttp");
+
+} // end anonymous namespace
+
+
 namespace LLCore
 {
 
@@ -94,8 +102,8 @@ void HttpOperation::stageFromRequest(HttpService *)
 	// Default implementation should never be called.  This
 	// indicates an operation making a transition that isn't
 	// defined.
-	LL_ERRS("CoreHttp") << "Default stageFromRequest method may not be called."
-						<< LL_ENDL;
+	LL_ERRS(LOG_CORE) << "Default stageFromRequest method may not be called."
+					  << LL_ENDL;
 }
 
 
@@ -104,8 +112,8 @@ void HttpOperation::stageFromReady(HttpService *)
 	// Default implementation should never be called.  This
 	// indicates an operation making a transition that isn't
 	// defined.
-	LL_ERRS("CoreHttp") << "Default stageFromReady method may not be called."
-						<< LL_ENDL;
+	LL_ERRS(LOG_CORE) << "Default stageFromReady method may not be called."
+					  << LL_ENDL;
 }
 
 
@@ -114,8 +122,8 @@ void HttpOperation::stageFromActive(HttpService *)
 	// Default implementation should never be called.  This
 	// indicates an operation making a transition that isn't
 	// defined.
-	LL_ERRS("CoreHttp") << "Default stageFromActive method may not be called."
-						<< LL_ENDL;
+	LL_ERRS(LOG_CORE) << "Default stageFromActive method may not be called."
+					  << LL_ENDL;
 }
 
 
@@ -145,9 +153,9 @@ void HttpOperation::addAsReply()
 {
 	if (mTracing > HTTP_TRACE_OFF)
 	{
-		LL_INFOS("CoreHttp") << "TRACE, ToReplyQueue, Handle:  "
-							 << static_cast<HttpHandle>(this)
-							 << LL_ENDL;
+		LL_INFOS(LOG_CORE) << "TRACE, ToReplyQueue, Handle:  "
+						   << static_cast<HttpHandle>(this)
+						   << LL_ENDL;
 	}
 	
 	if (mReplyQueue)
diff --git a/indra/llcorehttp/_httpoprequest.cpp b/indra/llcorehttp/_httpoprequest.cpp
index 43dd069bc67..eb664fdced5 100755
--- a/indra/llcorehttp/_httpoprequest.cpp
+++ b/indra/llcorehttp/_httpoprequest.cpp
@@ -4,7 +4,7 @@
  *
  * $LicenseInfo:firstyear=2012&license=viewerlgpl$
  * Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -94,6 +94,8 @@ void os_strlower(char * str);
 void check_curl_easy_code(CURLcode code);
 void check_curl_easy_code(CURLcode code, int curl_setopt_option);
 
+static const char * const LOG_CORE("CoreHttp");
+
 } // end anonymous namespace
 
 
@@ -416,8 +418,8 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
 	if (! mCurlHandle)
 	{
 		// We're in trouble.  We'll continue but it won't go well.
-		LL_WARNS("CoreHttp") << "Failed to allocate libcurl easy handle.  Continuing."
-							 << LL_ENDL;
+		LL_WARNS(LOG_CORE) << "Failed to allocate libcurl easy handle.  Continuing."
+						   << LL_ENDL;
 		return HttpStatus(HttpStatus::LLCORE, HE_BAD_ALLOC);
 	}
 	code = curl_easy_setopt(mCurlHandle, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
@@ -538,9 +540,9 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
 		break;
 		
 	default:
-		LL_ERRS("CoreHttp") << "Invalid HTTP method in request:  "
-							<< int(mReqMethod)  << ".  Can't recover."
-							<< LL_ENDL;
+		LL_ERRS(LOG_CORE) << "Invalid HTTP method in request:  "
+						  << int(mReqMethod)  << ".  Can't recover."
+						  << LL_ENDL;
 		break;
 	}
 
@@ -652,8 +654,8 @@ size_t HttpOpRequest::readCallback(void * data, size_t size, size_t nmemb, void
 		{
 			// Warn but continue if the read position moves beyond end-of-body
 			// for some reason.
-			LL_WARNS("CoreHttp") << "Request body position beyond body size.  Truncating request body."
-								 << LL_ENDL;
+			LL_WARNS(LOG_CORE) << "Request body position beyond body size.  Truncating request body."
+							   << LL_ENDL;
 		}
 		return 0;
 	}
@@ -790,10 +792,10 @@ size_t HttpOpRequest::headerCallback(void * data, size_t size, size_t nmemb, voi
 		else
 		{
 			// Ignore the unparsable.
-			LL_INFOS_ONCE("CoreHttp") << "Problem parsing odd Content-Range header:  '"
-									  << std::string(hdr_data, wanted_hdr_size)
-									  << "'.  Ignoring."
-									  << LL_ENDL;
+			LL_INFOS_ONCE(LOG_CORE) << "Problem parsing odd Content-Range header:  '"
+									<< std::string(hdr_data, wanted_hdr_size)
+									<< "'.  Ignoring."
+									<< LL_ENDL;
 		}
 	}
 
@@ -895,11 +897,11 @@ int HttpOpRequest::debugCallback(CURL * handle, curl_infotype info, char * buffe
 
 	if (logit)
 	{
-		LL_INFOS("CoreHttp") << "TRACE, LibcurlDebug, Handle:  "
-							 << static_cast<HttpHandle>(op)
-							 << ", Type:  " << tag
-							 << ", Data:  " << safe_line
-							 << LL_ENDL;
+		LL_INFOS(LOG_CORE) << "TRACE, LibcurlDebug, Handle:  "
+						   << static_cast<HttpHandle>(op)
+						   << ", Type:  " << tag
+						   << ", Data:  " << safe_line
+						   << LL_ENDL;
 	}
 		
 	return 0;
@@ -1094,9 +1096,9 @@ void check_curl_easy_code(CURLcode code, int curl_setopt_option)
 		//
 		// linux appears to throw a curl error once per session for a bad initialization
 		// at a pretty random time (when enabling cookies).
-		LL_WARNS("CoreHttp") << "libcurl error detected:  " << curl_easy_strerror(code)
-							 << ", curl_easy_setopt option:  " << curl_setopt_option
-							 << LL_ENDL;
+		LL_WARNS(LOG_CORE) << "libcurl error detected:  " << curl_easy_strerror(code)
+						   << ", curl_easy_setopt option:  " << curl_setopt_option
+						   << LL_ENDL;
 	}
 }
 
@@ -1109,8 +1111,8 @@ void check_curl_easy_code(CURLcode code)
 		//
 		// linux appears to throw a curl error once per session for a bad initialization
 		// at a pretty random time (when enabling cookies).
-		LL_WARNS("CoreHttp") << "libcurl error detected:  " << curl_easy_strerror(code)
-							 << LL_ENDL;
+		LL_WARNS(LOG_CORE) << "libcurl error detected:  " << curl_easy_strerror(code)
+						   << LL_ENDL;
 	}
 }
 
diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp
index bb7959b578a..09b9206f63d 100755
--- a/indra/llcorehttp/_httppolicy.cpp
+++ b/indra/llcorehttp/_httppolicy.cpp
@@ -35,6 +35,13 @@
 
 #include "lltimer.h"
 
+namespace
+{
+
+static const char * const LOG_CORE("CoreHttp");
+
+} // end anonymous namespace
+
 
 namespace LLCore
 {
@@ -51,7 +58,8 @@ struct HttpPolicy::ClassState
 	ClassState()
 		: mThrottleEnd(0),
 		  mThrottleLeft(0L),
-		  mRequestCount(0L)
+		  mRequestCount(0L),
+		  mStallStaging(false)
 		{}
 	
 	HttpReadyQueue		mReadyQueue;
@@ -61,6 +69,7 @@ struct HttpPolicy::ClassState
 	HttpTime			mThrottleEnd;
 	long				mThrottleLeft;
 	long				mRequestCount;
+	bool				mStallStaging;
 };
 
 
@@ -171,19 +180,19 @@ void HttpPolicy::retryOp(HttpOpRequest * op)
 	{
 		++op->mPolicy503Retries;
 	}
-	LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
-						  << " retry " << op->mPolicyRetries
-						  << " scheduled in " << (delta / HttpTime(1000))
-						  << " mS (" << (external_delta ? "external" : "internal")
-						  << ").  Status:  " << op->mStatus.toTerseString()
-						  << LL_ENDL;
+	LL_DEBUGS(LOG_CORE) << "HTTP request " << static_cast<HttpHandle>(op)
+						<< " retry " << op->mPolicyRetries
+						<< " scheduled in " << (delta / HttpTime(1000))
+						<< " mS (" << (external_delta ? "external" : "internal")
+						<< ").  Status:  " << op->mStatus.toTerseString()
+						<< LL_ENDL;
 	if (op->mTracing > HTTP_TRACE_OFF)
 	{
-		LL_INFOS("CoreHttp") << "TRACE, ToRetryQueue, Handle:  "
-							 << static_cast<HttpHandle>(op)
-							 << ", Delta:  " << (delta / HttpTime(1000))
-							 << ", Retries:  " << op->mPolicyRetries
-							 << LL_ENDL;
+		LL_INFOS(LOG_CORE) << "TRACE, ToRetryQueue, Handle:  "
+						   << static_cast<HttpHandle>(op)
+						   << ", Delta:  " << (delta / HttpTime(1000))
+						   << ", Retries:  " << op->mPolicyRetries
+						   << LL_ENDL;
 	}
 	mClasses[policy_class]->mRetryQueue.push(op);
 }
@@ -219,6 +228,15 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
 		HttpRetryQueue & retryq(state.mRetryQueue);
 		HttpReadyQueue & readyq(state.mReadyQueue);
 
+		if (state.mStallStaging)
+		{
+			// Stalling but don't sleep.  Need to complete operations
+			// and get back to servicing queues.  Do this test before
+			// the retryq/readyq test or you'll get stalls until you
+			// click a setting or an asset request comes in.
+			result = HttpService::NORMAL;
+			continue;
+		}
 		if (retryq.empty() && readyq.empty())
 		{
 			continue;
@@ -262,9 +280,9 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
 					if (now >= state.mThrottleEnd)
 					{
 						// Throttle expired, move to next window
-						LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
-											  << " requests to go and " << state.mRequestCount
-											  << " requests issued." << LL_ENDL;
+						LL_DEBUGS(LOG_CORE) << "Throttle expired with " << state.mThrottleLeft
+											<< " requests to go and " << state.mRequestCount
+											<< " requests issued." << LL_ENDL;
 						state.mThrottleLeft = state.mOptions.mThrottleRate;
 						state.mThrottleEnd = now + HttpTime(1000000);
 					}
@@ -291,9 +309,9 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
 					if (now >= state.mThrottleEnd)
 					{
 						// Throttle expired, move to next window
-						LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
-											  << " requests to go and " << state.mRequestCount
-											  << " requests issued." << LL_ENDL;
+						LL_DEBUGS(LOG_CORE) << "Throttle expired with " << state.mThrottleLeft
+											<< " requests to go and " << state.mRequestCount
+											<< " requests issued." << LL_ENDL;
 						state.mThrottleLeft = state.mOptions.mThrottleRate;
 						state.mThrottleEnd = now + HttpTime(1000000);
 					}
@@ -408,17 +426,17 @@ bool HttpPolicy::stageAfterCompletion(HttpOpRequest * op)
 	// This op is done, finalize it delivering it to the reply queue...
 	if (! op->mStatus)
 	{
-		LL_WARNS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
-							 << " failed after " << op->mPolicyRetries
-							 << " retries.  Reason:  " << op->mStatus.toString()
-							 << " (" << op->mStatus.toTerseString() << ")"
-							 << LL_ENDL;
+		LL_WARNS(LOG_CORE) << "HTTP request " << static_cast<HttpHandle>(op)
+						   << " failed after " << op->mPolicyRetries
+						   << " retries.  Reason:  " << op->mStatus.toString()
+						   << " (" << op->mStatus.toTerseString() << ")"
+						   << LL_ENDL;
 	}
 	else if (op->mPolicyRetries)
 	{
-		LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
-							  << " succeeded on retry " << op->mPolicyRetries << "."
-							  << LL_ENDL;
+		LL_DEBUGS(LOG_CORE) << "HTTP request " << static_cast<HttpHandle>(op)
+							<< " succeeded on retry " << op->mPolicyRetries << "."
+							<< LL_ENDL;
 	}
 
 	op->stageFromActive(mService);
@@ -446,4 +464,17 @@ int HttpPolicy::getReadyCount(HttpRequest::policy_t policy_class) const
 }
 
 
+bool HttpPolicy::stallPolicy(HttpRequest::policy_t policy_class, bool stall)
+{
+	bool ret(false);
+	
+	if (policy_class < mClasses.size())
+	{
+		ret = mClasses[policy_class]->mStallStaging;
+		mClasses[policy_class]->mStallStaging = stall;
+	}
+	return ret;
+}
+
+
 }  // end namespace LLCore
diff --git a/indra/llcorehttp/_httppolicy.h b/indra/llcorehttp/_httppolicy.h
index bf1aa742673..11cd89bbd1d 100755
--- a/indra/llcorehttp/_httppolicy.h
+++ b/indra/llcorehttp/_httppolicy.h
@@ -158,6 +158,14 @@ class HttpPolicy
 	/// Threading:  called by worker thread
 	int getReadyCount(HttpRequest::policy_t policy_class) const;
 	
+	/// Stall (or unstall) a policy class preventing requests from
+	/// transitioning to an active state.  Used to allow an HTTP
+	/// request policy to empty prior to changing settings or state
+	/// that isn't tolerant of changes when work is outstanding.
+	///
+	/// Threading:  called by worker thread
+	bool stallPolicy(HttpRequest::policy_t policy_class, bool stall);
+	
 protected:
 	struct ClassState;
 	typedef std::vector<ClassState *>	class_list_t;
diff --git a/indra/llcorehttp/_httpservice.cpp b/indra/llcorehttp/_httpservice.cpp
index c94249dc2d5..c673e1be1d3 100755
--- a/indra/llcorehttp/_httpservice.cpp
+++ b/indra/llcorehttp/_httpservice.cpp
@@ -4,7 +4,7 @@
  *
  * $LicenseInfo:firstyear=2012&license=viewerlgpl$
  * Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -40,6 +40,14 @@
 #include "llthread.h"
 
 
+namespace
+{
+
+static const char * const LOG_CORE("CoreHttp");
+
+} // end anonymous namespace
+
+
 namespace LLCore
 {
 
@@ -87,8 +95,8 @@ HttpService::~HttpService()
 				// Failed to join, expect problems ahead so do a hard termination.
 				mThread->cancel();
 
-				LL_WARNS("CoreHttp") << "Destroying HttpService with running thread.  Expect problems."
-									 << LL_ENDL;
+				LL_WARNS(LOG_CORE) << "Destroying HttpService with running thread.  Expect problems."
+								   << LL_ENDL;
 			}
 		}
 	}
@@ -328,9 +336,9 @@ HttpService::ELoopSpeed HttpService::processRequestQueue(ELoopSpeed loop)
 
 			if (op->mTracing > HTTP_TRACE_OFF)
 			{
-				LL_INFOS("CoreHttp") << "TRACE, FromRequestQueue, Handle:  "
-									 << static_cast<HttpHandle>(op)
-									 << LL_ENDL;
+				LL_INFOS(LOG_CORE) << "TRACE, FromRequestQueue, Handle:  "
+								   << static_cast<HttpHandle>(op)
+								   << LL_ENDL;
 			}
 
 			// Stage
@@ -437,9 +445,13 @@ HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequ
 		HttpPolicyClass & opts(mPolicy->getClassOptions(pclass));
 
 		status = opts.set(opt, value);
-		if (status && ret_value)
+		if (status)
 		{
-			status = opts.get(opt, ret_value);
+			mTransport->policyUpdated(pclass);
+			if (ret_value)
+			{
+				status = opts.get(opt, ret_value);
+			}
 		}
 	}
 
@@ -463,7 +475,7 @@ HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequ
 		return status;
 	}
 
-	// Only string values are global at this time
+	// String values are always global (at this time).
 	if (pclass == HttpRequest::GLOBAL_POLICY_ID)
 	{
 		HttpPolicyGlobal & opts(mPolicy->getGlobalOptions());
diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml
index 50f56cf6ff5..0607579a086 100755
--- a/indra/newview/app_settings/settings.xml
+++ b/indra/newview/app_settings/settings.xml
@@ -4459,7 +4459,7 @@
     <key>HttpPipelining</key>
     <map>
       <key>Comment</key>
-      <string>If true, viewer will pipeline HTTP requests to servers.  Static.</string>
+      <string>If true, viewer will pipeline HTTP requests to servers.</string>
       <key>Persist</key>
       <integer>1</integer>
       <key>Type</key>
@@ -4470,7 +4470,7 @@
     <key>HttpRangeRequestsDisable</key>
     <map>
       <key>Comment</key>
-      <string>If true, viewer will not issued range GET requests for meshes and textures.</string>
+      <string>If true, viewer will not issued range GET requests for meshes and textures.  May resolve problems with certain ISPs and networking gear.</string>
       <key>Persist</key>
       <integer>1</integer>
       <key>Type</key>
diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp
index d097f18d61c..464e60948aa 100755
--- a/indra/newview/llappcorehttp.cpp
+++ b/indra/newview/llappcorehttp.cpp
@@ -60,7 +60,7 @@ static const struct
 		"other"
 	},
 	{ // AP_TEXTURE
-		4,		1,		12,		0,		true,	
+		8,		1,		12,		0,		true,	
 		"TextureFetchConcurrency",
 		"texture fetch"
 	},
@@ -70,7 +70,7 @@ static const struct
 		"mesh fetch"
 	},
 	{ // AP_MESH2
-		4,		1,		32,		100,	true,	
+		8,		1,		32,		100,	true,	
 		"Mesh2MaxConcurrentRequests",
 		"mesh2 fetch"
 	},
@@ -126,14 +126,6 @@ void LLAppCoreHttp::init()
 						<< LL_ENDL;
 	}
 
-	// Global pipelining preference from settings
-	static const std::string http_pipelining("HttpPipelining");
-	if (gSavedSettings.controlExists(http_pipelining))
-	{
-		// Default to true if absent.
-		mPipelined = gSavedSettings.getBOOL(http_pipelining);
-	}
-
 	// Point to our certs or SSH/https: will fail on connect
 	status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_CA_FILE,
 														LLCore::HttpRequest::GLOBAL_POLICY_ID,
@@ -210,12 +202,27 @@ void LLAppCoreHttp::init()
 						<< LL_ENDL;
 	}
 
-	// *NOTE:  Pipelining isn't dynamic yet.  When it is, add a global
-	// signal for the setting here.
-	
+	// Signal for global pipelining preference from settings
+	static const std::string http_pipelining("HttpPipelining");
+	if (gSavedSettings.controlExists(http_pipelining))
+	{
+		LLPointer<LLControlVariable> cntrl_ptr = gSavedSettings.getControl(http_pipelining);
+		if (cntrl_ptr.isNull())
+		{
+			LL_WARNS("Init") << "Unable to set signal on global setting '" << http_pipelining
+							 << "'" << LL_ENDL;
+		}
+		else
+		{
+			mPipelinedSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed));
+		}
+	}
+
 	// Register signals for settings and state changes
 	for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
 	{
+		const EAppPolicy app_policy(static_cast<EAppPolicy>(i));
+
 		if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey))
 		{
 			LLPointer<LLControlVariable> cntrl_ptr = gSavedSettings.getControl(init_data[i].mKey);
@@ -226,7 +233,7 @@ void LLAppCoreHttp::init()
 			}
 			else
 			{
-				mHttpClasses[i].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed));
+				mHttpClasses[app_policy].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed));
 			}
 		}
 	}
@@ -282,6 +289,7 @@ void LLAppCoreHttp::cleanup()
 	{
 		mHttpClasses[i].mSettingsSignal.disconnect();
 	}
+	mPipelinedSignal.disconnect();
 	
 	delete mRequest;
 	mRequest = NULL;
@@ -299,6 +307,20 @@ void LLAppCoreHttp::cleanup()
 void LLAppCoreHttp::refreshSettings(bool initial)
 {
 	LLCore::HttpStatus status;
+
+	// Global pipelining setting
+	bool pipeline_changed(false);
+	static const std::string http_pipelining("HttpPipelining");
+	if (gSavedSettings.controlExists(http_pipelining))
+	{
+		// Default to true (in ctor) if absent.
+		bool pipelined(gSavedSettings.getBOOL(http_pipelining));
+		if (pipelined != mPipelined)
+		{
+			mPipelined = pipelined;
+			pipeline_changed = true;
+		}
+	}
 	
 	for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
 	{
@@ -323,33 +345,42 @@ void LLAppCoreHttp::refreshSettings(bool initial)
 				}
 			}
 
-			mHttpClasses[app_policy].mPipelined = false;
-			if (mPipelined && init_data[i].mPipelined)
+		}
+
+		// Init- or run-time settings.  Must use the queued request API.
+
+		// Pipelining changes
+		if (initial || pipeline_changed)
+		{
+			const bool to_pipeline(mPipelined && init_data[i].mPipelined);
+			if (to_pipeline != mHttpClasses[app_policy].mPipelined)
 			{
-				// Pipelining election is currently static (init-time).
-				// Making it dynamic isn't too hard in the SL code but verifying
-				// that libcurl handles the on-to-off transition while holding
-				// outstanding requests is something that should be tested.
+				// Pipeline election changing, set dynamic option via request
+
+				LLCore::HttpHandle handle;
+				const long new_depth(to_pipeline ? PIPELINING_DEPTH : 0);
 				
-				status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH,
-																	mHttpClasses[app_policy].mPolicy,
-																	PIPELINING_DEPTH,
-																	NULL);
-				if (! status)
+				handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH,
+												   mHttpClasses[app_policy].mPolicy,
+												   new_depth,
+												   NULL);
+				if (LLCORE_HTTP_HANDLE_INVALID == handle)
 				{
+					status = mRequest->getStatus();
 					LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
-									 << " to pipelined mode.  Reason:  " << status.toString()
+									 << " pipelining.  Reason:  " << status.toString()
 									 << LL_ENDL;
 				}
 				else
 				{
-					mHttpClasses[app_policy].mPipelined = true;
+					LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage
+									  << " pipelining.  New value:  " << new_depth
+									  << LL_ENDL;
+					mHttpClasses[app_policy].mPipelined = to_pipeline;
 				}
 			}
 		}
-
-		// Init- or run-time settings
-
+		
 		// Get target connection concurrency value
 		U32 setting(init_data[i].mDefault);
 		if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey))
@@ -362,63 +393,60 @@ void LLAppCoreHttp::refreshSettings(bool initial)
 			}
 		}
 
-		if (! initial && setting == mHttpClasses[app_policy].mConnLimit)
+		if (initial || setting != mHttpClasses[app_policy].mConnLimit || pipeline_changed)
 		{
-			// Unchanged, try next setting
-			continue;
-		}
-		
-		// Set it and report.  Strategies depend on pipelining:
-		//
-		// No Pipelining.  Llcorehttp manages connections itself based
-		// on the PO_CONNECTION_LIMIT setting.  Set both limits to the
-		// same value for logical consistency.  In the future, may
-		// hand over connection management to libcurl after the
-		// connection cache has been better vetted.
-		//
-		// Pipelining.  Libcurl is allowed to manage connections to a
-		// great degree.  Steady state will connection limit based on
-		// the per-host setting.  Transitions (region crossings, new
-		// avatars, etc.) can request additional outbound connections
-		// to other servers via 2X total connection limit.
-		//
-		LLCore::HttpHandle handle;
-		handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT,
-										   mHttpClasses[app_policy].mPolicy,
-										   (mHttpClasses[app_policy].mPipelined ? 2 * setting : setting),
-										   NULL);
-		if (LLCORE_HTTP_HANDLE_INVALID == handle)
-		{
-			status = mRequest->getStatus();
-			LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
-							 << " concurrency.  Reason:  " << status.toString()
-							 << LL_ENDL;
-		}
-		else
-		{
-			handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT,
+			// Set it and report.  Strategies depend on pipelining:
+			//
+			// No Pipelining.  Llcorehttp manages connections itself based
+			// on the PO_CONNECTION_LIMIT setting.  Set both limits to the
+			// same value for logical consistency.  In the future, may
+			// hand over connection management to libcurl after the
+			// connection cache has been better vetted.
+			//
+			// Pipelining.  Libcurl is allowed to manage connections to a
+			// great degree.  Steady state will connection limit based on
+			// the per-host setting.  Transitions (region crossings, new
+			// avatars, etc.) can request additional outbound connections
+			// to other servers via 2X total connection limit.
+			//
+			LLCore::HttpHandle handle;
+			handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT,
 											   mHttpClasses[app_policy].mPolicy,
-											   setting,
+											   (mHttpClasses[app_policy].mPipelined ? 2 * setting : setting),
 											   NULL);
 			if (LLCORE_HTTP_HANDLE_INVALID == handle)
 			{
 				status = mRequest->getStatus();
 				LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
-								 << " per-host concurrency.  Reason:  " << status.toString()
+								 << " concurrency.  Reason:  " << status.toString()
 								 << LL_ENDL;
 			}
 			else
 			{
-				LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage
-								  << " concurrency.  New value:  " << setting
-								  << LL_ENDL;
-				mHttpClasses[app_policy].mConnLimit = setting;
-				if (initial && setting != init_data[i].mDefault)
+				handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT,
+												   mHttpClasses[app_policy].mPolicy,
+												   setting,
+												   NULL);
+				if (LLCORE_HTTP_HANDLE_INVALID == handle)
 				{
-					LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage
-									 << " concurrency.  New value:  " << setting
+					status = mRequest->getStatus();
+					LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
+									 << " per-host concurrency.  Reason:  " << status.toString()
 									 << LL_ENDL;
 				}
+				else
+				{
+					LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage
+									  << " concurrency.  New value:  " << setting
+									  << LL_ENDL;
+					mHttpClasses[app_policy].mConnLimit = setting;
+					if (initial && setting != init_data[i].mDefault)
+					{
+						LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage
+										 << " concurrency.  New value:  " << setting
+										 << LL_ENDL;
+					}
+				}
 			}
 		}
 	}
diff --git a/indra/newview/llappcorehttp.h b/indra/newview/llappcorehttp.h
index 63c8a11180c..9ad4eb4b30a 100755
--- a/indra/newview/llappcorehttp.h
+++ b/indra/newview/llappcorehttp.h
@@ -218,6 +218,7 @@ class LLAppCoreHttp : public LLCore::HttpHandler
 	bool						mStopped;
 	HttpClass					mHttpClasses[AP_COUNT];
 	bool						mPipelined;				// Global setting
+	boost::signals2::connection mPipelinedSignal;		// Signal for 'HttpPipelining' setting
 };
 
 
diff --git a/indra/newview/lltexturefetch.cpp b/indra/newview/lltexturefetch.cpp
index 4008a6948d4..097a7b374f0 100755
--- a/indra/newview/lltexturefetch.cpp
+++ b/indra/newview/lltexturefetch.cpp
@@ -483,12 +483,12 @@ class LLTextureFetchWorker : public LLWorkerClass, public LLCore::HttpHandler
 	bool acquireHttpSemaphore()
 		{
 			llassert(! mHttpHasResource);
-			if (mFetcher->mHttpSemaphore <= 0)
+			if (mFetcher->mHttpSemaphore >= mFetcher->mHttpHighWater)
 			{
 				return false;
 			}
 			mHttpHasResource = true;
-			mFetcher->mHttpSemaphore--;
+			mFetcher->mHttpSemaphore++;
 			return true;
 		}
 
@@ -498,7 +498,8 @@ class LLTextureFetchWorker : public LLWorkerClass, public LLCore::HttpHandler
 		{
 			llassert(mHttpHasResource);
 			mHttpHasResource = false;
-			mFetcher->mHttpSemaphore++;
+			mFetcher->mHttpSemaphore--;
+			llassert_always(mFetcher->mHttpSemaphore >= 0);
 		}
 	
 private:
@@ -2523,6 +2524,8 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image
 	mMaxBandwidth = gSavedSettings.getF32("ThrottleBandwidthKBPS");
 	mTextureInfo.setUpLogging(gSavedSettings.getBOOL("LogTextureDownloadsToViewerLog"), gSavedSettings.getBOOL("LogTextureDownloadsToSimulator"), U32Bytes(gSavedSettings.getU32("TextureLoggingThreshold")));
 
+	LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp());
+	mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_TEXTURE);
 	mHttpRequest = new LLCore::HttpRequest;
 	mHttpOptions = new LLCore::HttpOptions;
 	mHttpOptionsWithHeaders = new LLCore::HttpOptions;
@@ -2531,21 +2534,9 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image
 	mHttpHeaders->append("Accept", "image/x-j2c");
 	mHttpMetricsHeaders = new LLCore::HttpHeaders;
 	mHttpMetricsHeaders->append("Content-Type", "application/llsd+xml");
-	LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp());
-	mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_TEXTURE);
-	if (app_core_http.isPipelined(LLAppCoreHttp::AP_TEXTURE))
-	{
-		// Init-time election that will have to change for
-		// support of dynamic changes to the pipelining enable flag.
-		mHttpHighWater = HTTP_PIPE_REQUESTS_HIGH_WATER;
-		mHttpLowWater = HTTP_PIPE_REQUESTS_LOW_WATER;
-	}
-	else
-	{
-		mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER;
-		mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER;
-	}
-	mHttpSemaphore = mHttpHighWater;
+	mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER;
+	mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER;
+	mHttpSemaphore = 0;
 
 	// Conditionally construct debugger object after 'this' is
 	// fully initialized.
@@ -3032,6 +3023,20 @@ bool LLTextureFetch::runCondition()
 // Threads:  Ttf
 void LLTextureFetch::commonUpdate()
 {
+	// Update low/high water levels based on pipelining.  We pick
+	// up setting eventually, so the semaphore/request level can
+	// fall outside the [0..HIGH_WATER] range.  Expect that.
+	if (LLAppViewer::instance()->getAppCoreHttp().isPipelined(LLAppCoreHttp::AP_TEXTURE))
+	{
+		mHttpHighWater = HTTP_PIPE_REQUESTS_HIGH_WATER;
+		mHttpLowWater = HTTP_PIPE_REQUESTS_LOW_WATER;
+	}
+	else
+	{
+		mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER;
+		mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER;
+	}
+
 	// Release waiters
 	releaseHttpWaiters();
 	
@@ -3693,8 +3698,16 @@ void LLTextureFetch::releaseHttpWaiters()
 {
 	// Use mHttpSemaphore rather than mHTTPTextureQueue.size()
 	// to avoid a lock.  
-	if (mHttpSemaphore < (mHttpHighWater - mHttpLowWater))
+	if (mHttpSemaphore >= mHttpLowWater)
 		return;
+	S32 needed(mHttpHighWater - mHttpSemaphore);
+	if (needed <= 0)
+	{
+		// Would only happen if High/LowWater were changed behind
+		// our back.  In that case, defer fill until usage falls within
+		// limits.
+		return;
+	}
 
 	// Quickly make a copy of all the LLUIDs.  Get off the
 	// mutex as early as possible.
@@ -3743,10 +3756,10 @@ void LLTextureFetch::releaseHttpWaiters()
 	tids.clear();
 
 	// Sort into priority order, if necessary and only as much as needed
-	if (tids2.size() > mHttpSemaphore)
+	if (tids2.size() > needed)
 	{
 		LLTextureFetchWorker::Compare compare;
-		std::partial_sort(tids2.begin(), tids2.begin() + mHttpSemaphore, tids2.end(), compare);
+		std::partial_sort(tids2.begin(), tids2.begin() + needed, tids2.end(), compare);
 	}
 
 	// Release workers up to the high water mark.  Since we aren't
diff --git a/indra/newview/lltexturefetch.h b/indra/newview/lltexturefetch.h
index d13736997fa..89d18e2c67a 100755
--- a/indra/newview/lltexturefetch.h
+++ b/indra/newview/lltexturefetch.h
@@ -356,8 +356,8 @@ class LLTextureFetch : public LLWorkerThread
 	LLCore::HttpHeaders *				mHttpHeaders;					// Ttf
 	LLCore::HttpHeaders *				mHttpMetricsHeaders;			// Ttf
 	LLCore::HttpRequest::policy_t		mHttpPolicyClass;				// T*
-	S32									mHttpHighWater;					// T* (ro)
-	S32									mHttpLowWater;					// T* (ro)
+	S32									mHttpHighWater;					// Ttf
+	S32									mHttpLowWater;					// Ttf
 	
 	// We use a resource semaphore to keep HTTP requests in
 	// WAIT_HTTP_RESOURCE2 if there aren't sufficient slots in the
@@ -366,7 +366,11 @@ class LLTextureFetch : public LLWorkerThread
 	// where it's more expensive to get at them.  Requests in either
 	// SEND_HTTP_REQ or WAIT_HTTP_REQ charge against the semaphore
 	// and tracking state transitions is critical to liveness.
-	LLAtomicS32							mHttpSemaphore;					// Ttf + Tmain
+	//
+	// Originally implemented as a traditional semaphore (heading towards
+	// zero), it now is an outstanding request count that is allowed to
+	// exceed the high water level (but not go below zero).
+	LLAtomicS32							mHttpSemaphore;					// Ttf
 	
 	typedef std::set<LLUUID> wait_http_res_queue_t;
 	wait_http_res_queue_t				mHttpWaitResource;				// Mfnq
-- 
GitLab