diff options
| author | Will Hawkins <[email protected]> | 2023-07-10 13:45:50 -0400 |
|---|---|---|
| committer | Will Hawkins <[email protected]> | 2023-07-10 13:45:50 -0400 |
| commit | 78d574a74665c8bc062c26755c80a8b524bce347 (patch) | |
| tree | 7ad65f0052defaea63acb2f3445be00ef97e24d6 /ms | |
| parent | fe17152a507bbf94a11cca7f49a51cbae9c0d67b (diff) | |
[Feature] Major update: Track measurements that may be delayed
Among other major feature additions, this version of the client tracks
any measurements that may be long delayed and considers their presence
or absence as part of a stability measurement.
This version of the client also more closely tracks the spec. In
particular, it performs a sinle-sided trimmed mean rather than a
double-sided trimmed mean.
Signed-off-by: Will Hawkins <[email protected]>
Diffstat (limited to 'ms')
| -rw-r--r-- | ms/ms.go | 371 | ||||
| -rw-r--r-- | ms/ms_test.go | 431 |
2 files changed, 0 insertions, 802 deletions
diff --git a/ms/ms.go b/ms/ms.go deleted file mode 100644 index 06c7340..0000000 --- a/ms/ms.go +++ /dev/null @@ -1,371 +0,0 @@ -/* - * This file is part of Go Responsiveness. - * - * Go Responsiveness is free software: you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free Software Foundation, - * either version 2 of the License, or (at your option) any later version. - * Go Responsiveness is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A - * PARTICULAR PURPOSE. See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with Go Responsiveness. If not, see <https://www.gnu.org/licenses/>. - */ - -package ms - -import ( - "fmt" - "math" - "sort" - - "github.com/network-quality/goresponsiveness/saturating" - "github.com/network-quality/goresponsiveness/utilities" - "golang.org/x/exp/constraints" -) - -type MathematicalSeries[T constraints.Float | constraints.Integer] interface { - AddElement(T) - CalculateAverage() float64 - AllSequentialIncreasesLessThan(float64) (bool, float64) - StandardDeviation() (bool, T) - IsNormallyDistributed() bool - Len() int - Values() []T - Percentile(int) T - DoubleSidedTrim(uint) MathematicalSeries[T] - Less(int, int) bool - Swap(int, int) -} - -func calculateAverage[T constraints.Integer | constraints.Float](elements []T) float64 { - total := T(0) - for i := 0; i < len(elements); i++ { - total += elements[i] - } - return float64(total) / float64(len(elements)) -} - -func calculatePercentile[T constraints.Integer | constraints.Float]( - elements []T, - p int, -) (result T) { - result = T(0) - if p < 0 || p > 100 { - return - } - - sort.Slice(elements, func(l int, r int) bool { return elements[l] < elements[r] }) - pindex := int64((float64(p) / float64(100)) * float64(len(elements))) - result = elements[pindex] - return -} - -type InfiniteMathematicalSeries[T constraints.Float | constraints.Integer] struct { - elements []T -} - -func NewInfiniteMathematicalSeries[T constraints.Float | constraints.Integer]() MathematicalSeries[T] { - return &InfiniteMathematicalSeries[T]{} -} - -func (ims *InfiniteMathematicalSeries[T]) Swap(i, j int) { - ims.elements[i], ims.elements[j] = ims.elements[j], ims.elements[i] -} - -func (ims *InfiniteMathematicalSeries[T]) Less(i, j int) bool { - return ims.elements[i] < ims.elements[j] -} - -func (ims *InfiniteMathematicalSeries[T]) DoubleSidedTrim(percent uint) MathematicalSeries[T] { - if percent >= 100 { - panic( - fmt.Sprintf("Cannot perform double-sided trim for an invalid percentage: %d", percent), - ) - } - - trimmed := &InfiniteMathematicalSeries[T]{} - trimmed.elements = make([]T, ims.Len()) - copy(trimmed.elements, ims.elements) - - sort.Sort(trimmed) - - elementsToTrim := uint64(float32(ims.Len()) * ((float32(percent)) / float32(100.0))) - trimmed.elements = trimmed.elements[elementsToTrim : len(trimmed.elements)-int(elementsToTrim)] - - return trimmed -} - -func (ims *InfiniteMathematicalSeries[T]) Copy() MathematicalSeries[T] { - newIms := InfiniteMathematicalSeries[T]{} - newIms.elements = make([]T, ims.Len()) - copy(newIms.elements, ims.elements) - return &newIms -} - -func (ims *InfiniteMathematicalSeries[T]) AddElement(element T) { - ims.elements = append(ims.elements, element) -} - -func (ims *InfiniteMathematicalSeries[T]) CalculateAverage() float64 { - return calculateAverage(ims.elements) -} - -func (ims *InfiniteMathematicalSeries[T]) AllSequentialIncreasesLessThan( - limit float64, -) (bool, float64) { - if len(ims.elements) < 2 { - return false, 0.0 - } - - maximumSequentialIncrease := float64(0) - for i := 1; i < len(ims.elements); i++ { - current := ims.elements[i] - previous := ims.elements[i-1] - percentChange := utilities.SignedPercentDifference(current, previous) - if percentChange > limit { - return false, percentChange - } - if percentChange > float64(maximumSequentialIncrease) { - maximumSequentialIncrease = percentChange - } - } - return true, maximumSequentialIncrease -} - -/* - * N.B.: Overflow is possible -- use at your discretion! - */ -func (ims *InfiniteMathematicalSeries[T]) StandardDeviation() (bool, T) { - // From https://www.mathsisfun.com/data/standard-deviation-calculator.html - // Yes, for real! - - // Calculate the average of the numbers ... - average := ims.CalculateAverage() - - // Calculate the square of each of the elements' differences from the mean. - differences_squared := make([]float64, len(ims.elements)) - for index, value := range ims.elements { - differences_squared[index] = math.Pow(float64(value-T(average)), 2) - } - - // The variance is the average of the squared differences. - // So, we need to ... - - // Accumulate all those squared differences. - sds := float64(0) - for _, dss := range differences_squared { - sds += dss - } - - // And then divide that total by the number of elements - variance := sds / float64(len(ims.elements)) - - // Finally, the standard deviation is the square root - // of the variance. - sd := T(math.Sqrt(variance)) - // sd := T(variance) - - return true, sd -} - -func (ims *InfiniteMathematicalSeries[T]) IsNormallyDistributed() bool { - return false -} - -func (ims *InfiniteMathematicalSeries[T]) Len() int { - return len(ims.elements) -} - -func (ims *InfiniteMathematicalSeries[T]) Values() []T { - return ims.elements -} - -func (ims *InfiniteMathematicalSeries[T]) Percentile(p int) T { - return calculatePercentile(ims.elements, p) -} - -type CappedMathematicalSeries[T constraints.Float | constraints.Integer] struct { - elements_count uint - elements []T - index uint - divisor *saturating.Saturating[uint] -} - -func NewCappedMathematicalSeries[T constraints.Float | constraints.Integer]( - instants_count uint, -) MathematicalSeries[T] { - return &CappedMathematicalSeries[T]{ - elements: make([]T, instants_count), - elements_count: instants_count, - divisor: saturating.NewSaturating(instants_count), - index: 0, - } -} - -func (ma *CappedMathematicalSeries[T]) AddElement(measurement T) { - ma.elements[ma.index] = measurement - ma.divisor.Add(1) - // Invariant: ma.index always points to the oldest measurement - ma.index = (ma.index + 1) % ma.elements_count -} - -func (ma *CappedMathematicalSeries[T]) CalculateAverage() float64 { - // If we do not yet have all the values, then we know that the values - // exist between 0 and ma.divisor.Value(). If we do have all the values, - // we know that they, too, exist between 0 and ma.divisor.Value(). - return calculateAverage(ma.elements[0:ma.divisor.Value()]) -} - -func (ma *CappedMathematicalSeries[T]) AllSequentialIncreasesLessThan( - limit float64, -) (_ bool, maximumSequentialIncrease float64) { - // If we have not yet accumulated a complete set of intervals, - // this is false. - if ma.divisor.Value() != ma.elements_count { - return false, 0 - } - - // Invariant: ma.index always points to the oldest (see AddMeasurement - // above) - oldestIndex := ma.index - previous := ma.elements[oldestIndex] - maximumSequentialIncrease = 0 - for i := uint(1); i < ma.elements_count; i++ { - currentIndex := (oldestIndex + i) % ma.elements_count - current := ma.elements[currentIndex] - percentChange := utilities.SignedPercentDifference(current, previous) - previous = current - if percentChange > limit { - return false, percentChange - } - } - return true, maximumSequentialIncrease -} - -/* - * N.B.: Overflow is possible -- use at your discretion! - */ -func (ma *CappedMathematicalSeries[T]) StandardDeviation() (bool, T) { - // If we have not yet accumulated a complete set of intervals, - // we are always false. - if ma.divisor.Value() != ma.elements_count { - return false, T(0) - } - - // From https://www.mathsisfun.com/data/standard-deviation-calculator.html - // Yes, for real! - - // Calculate the average of the numbers ... - average := ma.CalculateAverage() - - // Calculate the square of each of the elements' differences from the mean. - differences_squared := make([]float64, ma.elements_count) - for index, value := range ma.elements { - differences_squared[index] = math.Pow(float64(value-T(average)), 2) - } - - // The variance is the average of the squared differences. - // So, we need to ... - - // Accumulate all those squared differences. - sds := float64(0) - for _, dss := range differences_squared { - sds += dss - } - - // And then divide that total by the number of elements - variance := sds / float64(ma.divisor.Value()) - - // Finally, the standard deviation is the square root - // of the variance. - sd := T(math.Sqrt(variance)) - // sd := T(variance) - - return true, sd -} - -func (ma *CappedMathematicalSeries[T]) IsNormallyDistributed() bool { - valid, stddev := ma.StandardDeviation() - // If there are not enough values in our series to generate a standard - // deviation, then we cannot do this calculation either. - if !valid { - return false - } - avg := float64(ma.CalculateAverage()) - - fstddev := float64(stddev) - within := float64(0) - for _, v := range ma.Values() { - if (avg-fstddev) <= float64(v) && float64(v) <= (avg+fstddev) { - within++ - } - } - return within/float64(ma.divisor.Value()) >= 0.68 -} - -func (ma *CappedMathematicalSeries[T]) Values() []T { - return ma.elements -} - -func (ma *CappedMathematicalSeries[T]) Len() int { - if uint(len(ma.elements)) != ma.elements_count { - panic( - fmt.Sprintf( - "Error: A capped mathematical series' metadata is invalid: the length of its element array/slice does not match element_count! (%v vs %v)", - ma.elements_count, - len(ma.elements), - ), - ) - } - return len(ma.elements) -} - -func (ma *CappedMathematicalSeries[T]) Percentile(p int) T { - if p < 0 || p > 100 { - return 0 - } - - // Because we need to sort the list to perform the percentile calculation, - // we have to make a copy of the list so that we don't disturb - // the time-relative ordering of the elements. - - kopy := make([]T, len(ma.elements)) - copy(kopy, ma.elements) - return calculatePercentile(kopy, p) -} - -func (ims *CappedMathematicalSeries[T]) Swap(i, j int) { - ims.elements[i], ims.elements[j] = ims.elements[j], ims.elements[i] -} - -func (ims *CappedMathematicalSeries[T]) Less(i, j int) bool { - return ims.elements[i] < ims.elements[j] -} - -func (ims *CappedMathematicalSeries[T]) DoubleSidedTrim(percent uint) MathematicalSeries[T] { - if percent >= 100 { - panic( - fmt.Sprintf("Cannot perform double-sided trim for an invalid percentage: %d", percent), - ) - } - - trimmed := &CappedMathematicalSeries[T]{elements_count: uint(ims.Len())} - trimmed.elements = make([]T, ims.Len()) - copy(trimmed.elements, ims.elements) - sort.Sort(trimmed) - - elementsToTrim := uint(float32(ims.Len()) * ((float32(percent)) / float32(100.0))) - trimmed.elements = trimmed.elements[elementsToTrim : len(trimmed.elements)-int(elementsToTrim)] - - trimmed.elements_count -= (elementsToTrim * 2) - - return trimmed -} - -func (ims *CappedMathematicalSeries[T]) Copy() MathematicalSeries[T] { - newCms := CappedMathematicalSeries[T]{} - newCms.elements = make([]T, ims.Len()) - copy(newCms.elements, ims.elements) - return &newCms -} diff --git a/ms/ms_test.go b/ms/ms_test.go deleted file mode 100644 index 34817d0..0000000 --- a/ms/ms_test.go +++ /dev/null @@ -1,431 +0,0 @@ -package ms - -import ( - "reflect" - "testing" - - "github.com/network-quality/goresponsiveness/utilities" -) - -func Test_InfiniteValues(test *testing.T) { - series := NewInfiniteMathematicalSeries[float64]() - shouldMatch := make([]float64, 0) - previous := float64(1.0) - for range utilities.Iota(1, 80) { - previous *= 1.059 - series.AddElement(float64(previous)) - shouldMatch = append(shouldMatch, previous) - } - - if !reflect.DeepEqual(shouldMatch, series.Values()) { - test.Fatalf("Values() on infinite mathematical series does not work.") - } -} - -func Test_InfiniteSequentialIncreasesAlwaysLessThan(test *testing.T) { - series := NewInfiniteMathematicalSeries[float64]() - previous := float64(1.0) - for range utilities.Iota(1, 80) { - previous *= 1.059 - series.AddElement(float64(previous)) - } - if islt, maxSeqIncrease := series.AllSequentialIncreasesLessThan(6.0); !islt { - test.Fatalf( - "(infinite) Sequential increases are not always less than 6.0 (%f).", - maxSeqIncrease, - ) - } -} - -func Test_CappedTooFewInstantsSequentialIncreasesLessThanAlwaysFalse(test *testing.T) { - series := NewCappedMathematicalSeries[float64](500) - series.AddElement(0.0) - if islt, _ := series.AllSequentialIncreasesLessThan(6.0); islt { - test.Fatalf( - "(infinite) 0 elements in a series should always yield false when asking if sequential increases are less than a value.", - ) - } -} - -func Test_Infinite_degenerate_percentile_too_high(test *testing.T) { - series := NewInfiniteMathematicalSeries[int]() - if series.Percentile(101) != 0 { - test.Fatalf("(infinite) Series percentile of 101 failed.") - } -} - -func Test_Infinite_degenerate_percentile_too_low(test *testing.T) { - series := NewInfiniteMathematicalSeries[int]() - if series.Percentile(-1) != 0 { - test.Fatalf("(infinite) Series percentile of -1 failed.") - } -} - -func Test_Infinite90_percentile(test *testing.T) { - var expected int64 = 10 - series := NewInfiniteMathematicalSeries[int64]() - series.AddElement(10) - series.AddElement(9) - series.AddElement(8) - series.AddElement(7) - series.AddElement(6) - series.AddElement(5) - series.AddElement(4) - series.AddElement(3) - series.AddElement(2) - series.AddElement(1) - - if series.Percentile(90) != expected { - test.Fatalf( - "(infinite) Series 90th percentile of 0 ... 10 failed: Expected: %v; Actual: %v.", expected, - series.Percentile(90), - ) - } -} - -func Test_Infinite90_percentile_reversed(test *testing.T) { - var expected int64 = 10 - series := NewInfiniteMathematicalSeries[int64]() - series.AddElement(1) - series.AddElement(2) - series.AddElement(3) - series.AddElement(4) - series.AddElement(5) - series.AddElement(6) - series.AddElement(7) - series.AddElement(8) - series.AddElement(9) - series.AddElement(10) - - if series.Percentile(90) != expected { - test.Fatalf( - "(infinite) Series 90th percentile of 0 ... 10 failed: Expected %v; Actual: %v.", expected, - series.Percentile(90), - ) - } -} - -func Test_Infinite50_percentile_jumbled(test *testing.T) { - var expected int64 = 15 - series := NewInfiniteMathematicalSeries[int64]() - series.AddElement(7) - series.AddElement(2) - series.AddElement(15) - series.AddElement(27) - series.AddElement(5) - series.AddElement(52) - series.AddElement(18) - series.AddElement(23) - series.AddElement(11) - series.AddElement(12) - - if series.Percentile(50) != expected { - test.Fatalf( - "(infinite) Series 50 percentile of a jumble of numbers failed: Expected %v; Actual: %v.", expected, - series.Percentile(50), - ) - } -} - -func Test_InfiniteDoubleSidedTrimmedMean_jumbled(test *testing.T) { - expected := 16 - series := NewInfiniteMathematicalSeries[int64]() - series.AddElement(7) - series.AddElement(2) - series.AddElement(15) - series.AddElement(27) - series.AddElement(5) - series.AddElement(5) - series.AddElement(52) - series.AddElement(18) - series.AddElement(23) - series.AddElement(11) - series.AddElement(22) - series.AddElement(17) - series.AddElement(14) - series.AddElement(9) - series.AddElement(100) - series.AddElement(72) - series.AddElement(91) - series.AddElement(43) - series.AddElement(37) - series.AddElement(62) - - trimmed := series.DoubleSidedTrim(10) - - if trimmed.Len() != expected { - test.Fatalf( - "Capped series is not of the proper size. Expected %v and got %v", - expected, - trimmed.Len(), - ) - } - - prev := int64(0) - for _, v := range trimmed.Values() { - if !(prev <= v) { - test.Fatalf("Not sorted: %v is not less than or equal to %v\n", prev, v) - } - prev = v - } -} - -func Test_CappedSequentialIncreasesAlwaysLessThan(test *testing.T) { - series := NewCappedMathematicalSeries[float64](40) - previous := float64(1.0) - for range utilities.Iota(1, 80) { - previous *= 1.059 - series.AddElement(float64(previous)) - } - if islt, maxSeqIncrease := series.AllSequentialIncreasesLessThan(6.0); !islt { - test.Fatalf("Sequential increases are not always less than 6.0 (%f).", maxSeqIncrease) - } -} - -func Test_CappedSequentialIncreasesAlwaysLessThanWithWraparound(test *testing.T) { - series := NewCappedMathematicalSeries[float64](20) - previous := float64(1.0) - for range utilities.Iota(1, 20) { - previous *= 1.15 - series.AddElement(float64(previous)) - } - - // All those measurements should be ejected by the following - // loop! - for range utilities.Iota(1, 20) { - previous *= 1.10 - series.AddElement(float64(previous)) - } - - if islt, maxSeqIncrease := series.AllSequentialIncreasesLessThan(11.0); !islt { - test.Fatalf( - "Sequential increases are not always less than 11.0 in wraparound situation (%f v 11.0).", - maxSeqIncrease, - ) - } -} - -func Test_CappedSequentialIncreasesAlwaysLessThanWithWraparoundInverse(test *testing.T) { - series := NewCappedMathematicalSeries[float64](20) - previous := float64(1.0) - for range utilities.Iota(1, 20) { - previous *= 1.15 - series.AddElement(float64(previous)) - } - - // *Not* all those measurements should be ejected by the following - // loop! - for range utilities.Iota(1, 15) { - previous *= 1.10 - series.AddElement(float64(previous)) - } - - if islt, maxSeqIncrease := series.AllSequentialIncreasesLessThan(11.0); islt { - test.Fatalf( - "Sequential increases are (unexpectedly) always less than 11.0 in wraparound situation: %f v 11.0.", - maxSeqIncrease, - ) - } -} - -func Test_CappedStandardDeviationCalculation(test *testing.T) { - expected := 2.93 - series := NewCappedMathematicalSeries[float64](5) - // 5.7, 1.0, 8.6, 7.4, 2.2 - series.AddElement(5.7) - series.AddElement(5.7) - series.AddElement(5.7) - series.AddElement(5.7) - series.AddElement(5.7) - series.AddElement(5.7) - series.AddElement(5.7) - series.AddElement(5.7) - series.AddElement(5.7) - series.AddElement(1.0) - series.AddElement(8.6) - series.AddElement(7.4) - series.AddElement(2.2) - - if _, sd := series.StandardDeviation(); !utilities.ApproximatelyEqual(sd, expected, 0.01) { - test.Fatalf("Standard deviation max calculation failed: Expected: %v; Actual: %v.", expected, sd) - } else { - test.Logf("Standard deviation calculation result: %v", sd) - } -} - -func Test_CappedStandardDeviationCalculation2(test *testing.T) { - expected := 1.41 - series := NewCappedMathematicalSeries[float64](5) - series.AddElement(8) - series.AddElement(9) - series.AddElement(10) - series.AddElement(11) - series.AddElement(12) - - if _, sd := series.StandardDeviation(); !utilities.ApproximatelyEqual(sd, expected, 0.01) { - test.Fatalf("Standard deviation max calculation failed: Expected: %v; Actual: %v.", expected, sd) - } else { - test.Logf("Standard deviation calculation result: %v", sd) - } -} - -func Test_CappedRotatingValues(test *testing.T) { - series := NewCappedMathematicalSeries[int](5) - - series.AddElement(1) - series.AddElement(2) - series.AddElement(3) - series.AddElement(4) - series.AddElement(5) - - series.AddElement(6) - series.AddElement(7) - - if !reflect.DeepEqual([]int{6, 7, 3, 4, 5}, series.Values()) { - test.Fatalf("Adding values does not properly erase earlier values.") - } -} - -func Test_CappedLen(test *testing.T) { - series := NewCappedMathematicalSeries[int](5) - - series.AddElement(1) - series.AddElement(2) - series.AddElement(3) - series.AddElement(4) - series.AddElement(5) - - series.AddElement(6) - series.AddElement(7) - - if series.Len() != 5 { - test.Fatalf("Series size calculations failed.") - } -} - -func Test_Capped_degenerate_percentile_too_high(test *testing.T) { - series := NewCappedMathematicalSeries[int](21) - if series.Percentile(101) != 0 { - test.Fatalf("Series percentile of 101 failed.") - } -} - -func Test_Capped_degenerate_percentile_too_low(test *testing.T) { - series := NewCappedMathematicalSeries[int](21) - if series.Percentile(-1) != 0 { - test.Fatalf("Series percentile of -1 failed.") - } -} - -func Test_Capped90_percentile(test *testing.T) { - var expected int = 10 - series := NewCappedMathematicalSeries[int](10) - series.AddElement(10) - series.AddElement(9) - series.AddElement(8) - series.AddElement(7) - series.AddElement(6) - series.AddElement(5) - series.AddElement(4) - series.AddElement(3) - series.AddElement(2) - series.AddElement(1) - - if series.Percentile(90) != expected { - test.Fatalf( - "Series 90th percentile of 0 ... 10 failed: Expected %v got %v.", expected, - series.Percentile(90), - ) - } -} - -func Test_Capped90_percentile_reversed(test *testing.T) { - series := NewCappedMathematicalSeries[int64](10) - series.AddElement(1) - series.AddElement(2) - series.AddElement(3) - series.AddElement(4) - series.AddElement(5) - series.AddElement(6) - series.AddElement(7) - series.AddElement(8) - series.AddElement(9) - series.AddElement(10) - - if series.Percentile(90) != 10 { - test.Fatalf( - "Series 90th percentile of 0 ... 10 failed: Expected 10 got %v.", - series.Percentile(90), - ) - } -} - -func Test_Capped50_percentile_jumbled(test *testing.T) { - var expected int64 = 15 - series := NewCappedMathematicalSeries[int64](10) - series.AddElement(7) - series.AddElement(2) - series.AddElement(15) - series.AddElement(27) - series.AddElement(5) - series.AddElement(52) - series.AddElement(18) - series.AddElement(23) - series.AddElement(11) - series.AddElement(12) - - if series.Percentile(50) != expected { - test.Fatalf( - "Series 50 percentile of a jumble of numbers failed: Expected %v got %v.", expected, - series.Percentile(50), - ) - } -} - -func Test_CappedDoubleSidedTrimmedMean_jumbled(test *testing.T) { - expected := 8 - series := NewCappedMathematicalSeries[int64](10) - series.AddElement(7) - series.AddElement(2) - series.AddElement(15) - series.AddElement(27) - series.AddElement(5) - series.AddElement(5) - series.AddElement(52) - series.AddElement(18) - series.AddElement(23) - series.AddElement(11) - series.AddElement(12) - - trimmed := series.DoubleSidedTrim(10) - - if trimmed.Len() != expected { - test.Fatalf( - "Capped series is not of the proper size. Expected %v and got %v", - expected, - trimmed.Len(), - ) - } - - prev := int64(0) - for _, v := range trimmed.Values() { - if !(prev <= v) { - test.Fatalf("Not sorted: %v is not less than or equal to %v\n", prev, v) - } - prev = v - } -} - -func Test_CappedAverage(test *testing.T) { - expected := 1.0082230220488836e+08 - series := NewCappedMathematicalSeries[float64](4) - series.AddElement(9.94747772516195e+07) - series.AddElement(9.991286984703423e+07) - series.AddElement(1.0285437111086299e+08) - series.AddElement(1.0104719061003672e+08) - if average := series.CalculateAverage(); !utilities.ApproximatelyEqual(average, 0.01, expected) { - test.Fatalf( - "Expected: %v; Actual: %v.", average, expected, - ) - } -} |
