From e9a9dddd8628435fc7af886c76cd11016ff40d66 Mon Sep 17 00:00:00 2001
From: Rye Mutt <rye@alchemyviewer.org>
Date: Tue, 31 Jan 2023 19:31:00 -0500
Subject: [PATCH] Replace boost random with std random that doesn't break
 particles this time

---
 indra/llcommon/llrand.cpp | 90 +++++++++++++++++++++++++++------------
 1 file changed, 63 insertions(+), 27 deletions(-)

diff --git a/indra/llcommon/llrand.cpp b/indra/llcommon/llrand.cpp
index 5c451b63d0a..b63eed50b02 100644
--- a/indra/llcommon/llrand.cpp
+++ b/indra/llcommon/llrand.cpp
@@ -29,68 +29,104 @@
 #include "llrand.h"
 #include "lluuid.h"
 
-#include <boost/random/lagged_fibonacci.hpp>
+#include <random>
 
-static boost::lagged_fibonacci2281 gRandomGenerator(LLUUID::getRandomSeed());
-inline F64 ll_internal_random_double()
-{
-	F64 rv = gRandomGenerator();
-	if(!((rv >= 0.0) && (rv < 1.0))) return fmod(rv, 1.0);
-	return rv;
-}
+/**
+ * Through analysis, we have decided that we want to take values which
+ * are close enough to 1.0 to map back to 0.0.  We came to this
+ * conclusion from noting that:
+ *
+ * [0.0, 1.0)
+ *
+ * when scaled to the integer set:
+ *
+ * [0, 4)
+ *
+ * there is some value close enough to 1.0 that when multiplying by 4,
+ * gets truncated to 4. Therefore:
+ *
+ * [0,1-eps] => 0
+ * [1,2-eps] => 1
+ * [2,3-eps] => 2
+ * [3,4-eps] => 3
+ *
+ * So 0 gets uneven distribution if we simply clamp. The actual
+ * clamp utilized in this file is to map values out of range back
+ * to 0 to restore uniform distribution.
+ *
+ * Also, for clamping floats when asking for a distribution from
+ * [0.0,g) we have determined that for values of g < 0.5, then
+ * rand*g=g, which is not the desired result. As above, we clamp to 0
+ * to restore uniform distribution.
+ */
 
-inline F32 ll_internal_random_float()
+static thread_local std::unique_ptr<std::ranlux48> __generator;
+inline std::ranlux48* _generator()
 {
-	// The clamping rules are described above.
-	F32 rv = (F32)gRandomGenerator();
-	if(!((rv >= 0.0f) && (rv < 1.0f))) return fmod(rv, 1.f);
-	return rv;
+	if (!__generator.get())
+	{
+		std::random_device seeder;
+		__generator = std::make_unique<std::ranlux48>(seeder());
+	}
+	return __generator.get();
 }
 
 S32 ll_rand()
 {
-	return ll_rand(RAND_MAX);
+	return ll_rand(S32_MAX);
 }
 
 S32 ll_rand(S32 val)
 {
-	S32 rv = (S32)(ll_internal_random_double() * val);
-	if(rv == val) return 0;
+	if (val == 0) return 0;
+
+	// The clamping rules are described above.
+	S32 rv;
+	if (val > 0)
+	{
+		rv = std::uniform_int_distribution<S32>(0, val)(*_generator());
+	}
+	else
+	{
+		rv = std::uniform_int_distribution<S32>(val, 0)(*_generator());
+	}
+	if (rv == val) return 0;
 	return rv;
 }
 
 F32 ll_frand()
 {
-	return ll_internal_random_float();
+	// see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63176
+	// and https://llvm.org/bugs/show_bug.cgi?id=18767
+	// and https://github.com/microsoft/STL/issues/1074
+	return (F32)std::generate_canonical<F64, 10>((*_generator()));
 }
 
 F32 ll_frand(F32 val)
 {
-	// The clamping rules are described above.
-	F32 rv = ll_internal_random_float() * val;
-	if(val > 0)
+	F32 rv = ll_frand() * val;
+	if (val > 0.0f)
 	{
-		if(rv >= val) return 0.0f;
+		if (rv >= val) return 0.0f;
 	}
 	else
 	{
-		if(rv <= val) return 0.0f;
+		if (rv <= val) return 0.0f;
 	}
 	return rv;
 }
 
 F64 ll_drand()
 {
-	return ll_internal_random_double();
+	return std::generate_canonical<F64, 10>((*_generator()));
 }
 
 F64 ll_drand(F64 val)
 {
-	// The clamping rules are described above.
-	F64 rv = ll_internal_random_double() * val;
-	if(val > 0)
+	F64 rv = ll_drand() * val;
+	if (val > 0.0)
 	{
-		if(rv >= val) return 0.0;
+		if (rv >= val) return 0.0;
 	}
 	else
 	{
-- 
GitLab